language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/models/mobilevitv2/modeling_mobilevitv2.py
{ "start": 29058, "end": 30039 }
class ____(nn.Module): """ DeepLabv3 architecture: https://huggingface.co/papers/1706.05587 """ def __init__(self, config: MobileViTV2Config) -> None: super().__init__() self.aspp = MobileViTV2ASPP(config) self.dropout = nn.Dropout2d(config.classifier_dropout_prob) self.classifier = MobileViTV2ConvLayer( config, in_channels=config.aspp_out_channels, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True, ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: features = self.aspp(hidden_states[-1]) features = self.dropout(features) features = self.classifier(features) return features @auto_docstring( custom_intro=""" MobileViTV2 model with a semantic segmentation head on top, e.g. for Pascal VOC. """ )
MobileViTV2DeepLabV3
python
openai__openai-python
src/openai/types/responses/response_reasoning_summary_text_delta_event.py
{ "start": 215, "end": 846 }
class ____(BaseModel): delta: str """The text delta that was added to the summary.""" item_id: str """The ID of the item this summary text delta is associated with.""" output_index: int """The index of the output item this summary text delta is associated with.""" sequence_number: int """The sequence number of this event.""" summary_index: int """The index of the summary part within the reasoning summary.""" type: Literal["response.reasoning_summary_text.delta"] """The type of the event. Always `response.reasoning_summary_text.delta`."""
ResponseReasoningSummaryTextDeltaEvent
python
Netflix__metaflow
metaflow/sidecar/sidecar_messages.py
{ "start": 471, "end": 561 }
class ____(object): INVALID, MUST_SEND, BEST_EFFORT, SHUTDOWN = range(1, 5)
MessageTypes
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/non_slot_assignment.py
{ "start": 1421, "end": 1639 }
class ____: __slots__ = ("bar",) def __init__(self): self.qux = 2 @property def qux(self): return self.bar * 2 @qux.setter def qux(self, value): self.bar = value / 2
Foo
python
networkx__networkx
networkx/utils/misc.py
{ "start": 11045, "end": 22844 }
class ____: """PythonRandomInterface is included for backward compatibility New code should use PythonRandomViaNumpyBits instead. """ def __init__(self, rng=None): try: import numpy as np except ImportError: msg = "numpy not found, only random.random available." warnings.warn(msg, ImportWarning) if rng is None: self._rng = np.random.mtrand._rand else: self._rng = rng def random(self): return self._rng.random() def uniform(self, a, b): return a + (b - a) * self._rng.random() def randrange(self, a, b=None): import numpy as np if b is None: a, b = 0, a if b > 9223372036854775807: # from np.iinfo(np.int64).max tmp_rng = PythonRandomViaNumpyBits(self._rng) return tmp_rng.randrange(a, b) if isinstance(self._rng, np.random.Generator): return self._rng.integers(a, b) return self._rng.randint(a, b) # NOTE: the numpy implementations of `choice` don't support strings, so # this cannot be replaced with self._rng.choice def choice(self, seq): import numpy as np if isinstance(self._rng, np.random.Generator): idx = self._rng.integers(0, len(seq)) else: idx = self._rng.randint(0, len(seq)) return seq[idx] def gauss(self, mu, sigma): return self._rng.normal(mu, sigma) def shuffle(self, seq): return self._rng.shuffle(seq) # Some methods don't match API for numpy RandomState. # Commented out versions are not used by NetworkX def sample(self, seq, k): return self._rng.choice(list(seq), size=(k,), replace=False) def randint(self, a, b): import numpy as np if b > 9223372036854775807: # from np.iinfo(np.int64).max tmp_rng = PythonRandomViaNumpyBits(self._rng) return tmp_rng.randint(a, b) if isinstance(self._rng, np.random.Generator): return self._rng.integers(a, b + 1) return self._rng.randint(a, b + 1) # exponential as expovariate with 1/argument, def expovariate(self, scale): return self._rng.exponential(1 / scale) # pareto as paretovariate with argument, def paretovariate(self, shape): return self._rng.pareto(shape) # weibull as weibullvariate multiplied by beta, # def weibullvariate(self, alpha, beta): # return self._rng.weibull(alpha) * beta # # def triangular(self, low, high, mode): # return self._rng.triangular(low, mode, high) # # def choices(self, seq, weights=None, cum_weights=None, k=1): # return self._rng.choice(seq def create_py_random_state(random_state=None): """Returns a random.Random instance depending on input. Parameters ---------- random_state : int or random number generator or None (default=None) - If int, return a `random.Random` instance set with seed=int. - If `random.Random` instance, return it. - If None or the `np.random` package, return the global random number generator used by `np.random`. - If an `np.random.Generator` instance, or the `np.random` package, or the global numpy random number generator, then return it. wrapped in a `PythonRandomViaNumpyBits` class. - If a `PythonRandomViaNumpyBits` instance, return it. - If a `PythonRandomInterface` instance, return it. - If a `np.random.RandomState` instance and not the global numpy default, return it wrapped in `PythonRandomInterface` for backward bit-stream matching with legacy code. Notes ----- - A diagram intending to illustrate the relationships behind our support for numpy random numbers is called `NetworkX Numpy Random Numbers <https://excalidraw.com/#room=b5303f2b03d3af7ccc6a,e5ZDIWdWWCTTsg8OqoRvPA>`_. - More discussion about this support also appears in `gh-6869#comment <https://github.com/networkx/networkx/pull/6869#issuecomment-1944799534>`_. - Wrappers of numpy.random number generators allow them to mimic the Python random number generation algorithms. For example, Python can create arbitrarily large random ints, and the wrappers use Numpy bit-streams with CPython's random module to choose arbitrarily large random integers too. - We provide two wrapper classes: `PythonRandomViaNumpyBits` is usually what you want and is always used for `np.Generator` instances. But for users who need to recreate random numbers produced in NetworkX 3.2 or earlier, we maintain the `PythonRandomInterface` wrapper as well. We use it only used if passed a (non-default) `np.RandomState` instance pre-initialized from a seed. Otherwise the newer wrapper is used. """ if random_state is None or random_state is random: return random._inst if isinstance(random_state, random.Random): return random_state if isinstance(random_state, int): return random.Random(random_state) try: import numpy as np except ImportError: pass else: if isinstance(random_state, PythonRandomInterface | PythonRandomViaNumpyBits): return random_state if isinstance(random_state, np.random.Generator): return PythonRandomViaNumpyBits(random_state) if random_state is np.random: return PythonRandomViaNumpyBits(np.random.mtrand._rand) if isinstance(random_state, np.random.RandomState): if random_state is np.random.mtrand._rand: return PythonRandomViaNumpyBits(random_state) # Only need older interface if specially constructed RandomState used return PythonRandomInterface(random_state) msg = f"{random_state} cannot be used to generate a random.Random instance" raise ValueError(msg) def nodes_equal(nodes1, nodes2): """Check if nodes are equal. Equality here means equal as Python objects. Node data must match if included. The order of nodes is not relevant. Parameters ---------- nodes1, nodes2 : iterables of nodes, or (node, datadict) tuples Returns ------- bool True if nodes are equal, False otherwise. """ nlist1 = list(nodes1) nlist2 = list(nodes2) try: d1 = dict(nlist1) d2 = dict(nlist2) except (ValueError, TypeError): d1 = dict.fromkeys(nlist1) d2 = dict.fromkeys(nlist2) return d1 == d2 def edges_equal(edges1, edges2, *, directed=False): """Return whether edgelists are equal. Equality here means equal as Python objects. Edge data must match if included. Ordering of edges in an edgelist is not relevant; ordering of nodes in an edge is only relevant if ``directed == True``. Parameters ---------- edges1, edges2 : iterables of tuples Each tuple can be an edge tuple ``(u, v)``, or an edge tuple with data `dict` s ``(u, v, d)``, or an edge tuple with keys and data `dict` s ``(u, v, k, d)``. directed : bool, optional (default=False) If `True`, edgelists are treated as coming from directed graphs. Returns ------- bool `True` if edgelists are equal, `False` otherwise. Examples -------- >>> G1 = nx.complete_graph(3) >>> G2 = nx.cycle_graph(3) >>> edges_equal(G1.edges, G2.edges) True Edge order is not taken into account: >>> G1 = nx.Graph([(0, 1), (1, 2)]) >>> G2 = nx.Graph([(1, 2), (0, 1)]) >>> edges_equal(G1.edges, G2.edges) True The `directed` parameter controls whether edges are treated as coming from directed graphs. >>> DG1 = nx.DiGraph([(0, 1)]) >>> DG2 = nx.DiGraph([(1, 0)]) >>> edges_equal(DG1.edges, DG2.edges, directed=False) # Not recommended. True >>> edges_equal(DG1.edges, DG2.edges, directed=True) False This function is meant to be used on edgelists (i.e. the output of a ``G.edges()`` call), and can give unexpected results on unprocessed lists of edges: >>> l1 = [(0, 1)] >>> l2 = [(0, 1), (1, 0)] >>> edges_equal(l1, l2) # Not recommended. False >>> G1 = nx.Graph(l1) >>> G2 = nx.Graph(l2) >>> edges_equal(G1.edges, G2.edges) True >>> DG1 = nx.DiGraph(l1) >>> DG2 = nx.DiGraph(l2) >>> edges_equal(DG1.edges, DG2.edges, directed=True) False """ d1 = defaultdict(list) d2 = defaultdict(list) for e1, e2 in zip_longest(edges1, edges2, fillvalue=None): if e1 is None or e2 is None: return False # One is longer. for e, d in [(e1, d1), (e2, d2)]: u, v, *data = e d[u, v].append(data) if not directed: d[v, u].append(data) # Can check one direction because lengths are the same. return all(d1[e].count(data) == d2[e].count(data) for e in d1 for data in d1[e]) def graphs_equal(graph1, graph2): """Check if graphs are equal. Equality here means equal as Python objects (not isomorphism). Node, edge and graph data must match. Parameters ---------- graph1, graph2 : graph Returns ------- bool True if graphs are equal, False otherwise. """ return ( graph1.adj == graph2.adj and graph1.nodes == graph2.nodes and graph1.graph == graph2.graph ) def _clear_cache(G): """Clear the cache of a graph (currently stores converted graphs). Caching is controlled via ``nx.config.cache_converted_graphs`` configuration. """ if cache := getattr(G, "__networkx_cache__", None): cache.clear() def check_create_using(create_using, *, directed=None, multigraph=None, default=None): """Assert that create_using has good properties This checks for desired directedness and multi-edge properties. It returns `create_using` unless that is `None` when it returns the optionally specified default value. Parameters ---------- create_using : None, graph class or instance The input value of create_using for a function. directed : None or bool Whether to check `create_using.is_directed() == directed`. If None, do not assert directedness. multigraph : None or bool Whether to check `create_using.is_multigraph() == multigraph`. If None, do not assert multi-edge property. default : None or graph class The graph class to return if create_using is None. Returns ------- create_using : graph class or instance The provided graph class or instance, or if None, the `default` value. Raises ------ NetworkXError When `create_using` doesn't match the properties specified by `directed` or `multigraph` parameters. """ if default is None: default = nx.Graph G = create_using if create_using is not None else default G_directed = G.is_directed(None) if isinstance(G, type) else G.is_directed() G_multigraph = G.is_multigraph(None) if isinstance(G, type) else G.is_multigraph() if directed is not None: if directed and not G_directed: raise nx.NetworkXError("create_using must be directed") if not directed and G_directed: raise nx.NetworkXError("create_using must not be directed") if multigraph is not None: if multigraph and not G_multigraph: raise nx.NetworkXError("create_using must be a multi-graph") if not multigraph and G_multigraph: raise nx.NetworkXError("create_using must not be a multi-graph") return G
PythonRandomInterface
python
pydata__xarray
xarray/tests/test_backends.py
{ "start": 256103, "end": 259368 }
class ____: def convert_to_pydap_dataset(self, original): from pydap.model import BaseType, DatasetType ds = DatasetType("bears", **original.attrs) for key, var in original.data_vars.items(): ds[key] = BaseType( key, var.values, dtype=var.values.dtype.kind, dims=var.dims, **var.attrs ) # check all dims are stored in ds for d in original.coords: ds[d] = BaseType(d, original[d].values, dims=(d,), **original[d].attrs) return ds @contextlib.contextmanager def create_datasets(self, **kwargs): with open_example_dataset("bears.nc") as expected: # print("QQ0:", expected["bears"].load()) pydap_ds = self.convert_to_pydap_dataset(expected) actual = open_dataset(PydapDataStore(pydap_ds)) # netcdf converts string to byte not unicode # fixed in pydap 3.5.6. https://github.com/pydap/pydap/issues/510 actual["bears"].values = actual["bears"].values.astype("S") yield actual, expected def test_cmp_local_file(self) -> None: with self.create_datasets() as (actual, expected): assert_equal(actual, expected) # global attributes should be global attributes on the dataset assert "NC_GLOBAL" not in actual.attrs assert "history" in actual.attrs # we don't check attributes exactly with assertDatasetIdentical() # because the test DAP server seems to insert some extra # attributes not found in the netCDF file. assert actual.attrs.keys() == expected.attrs.keys() with self.create_datasets() as (actual, expected): assert_equal(actual[{"l": 2}], expected[{"l": 2}]) with self.create_datasets() as (actual, expected): # always return arrays and not scalars # scalars will be promoted to unicode for numpy >= 2.3.0 assert_equal(actual.isel(i=[0], j=[-1]), expected.isel(i=[0], j=[-1])) with self.create_datasets() as (actual, expected): assert_equal(actual.isel(j=slice(1, 2)), expected.isel(j=slice(1, 2))) with self.create_datasets() as (actual, expected): indexers = {"i": [1, 0, 0], "j": [1, 2, 0, 1]} assert_equal(actual.isel(**indexers), expected.isel(**indexers)) with self.create_datasets() as (actual, expected): indexers2 = { "i": DataArray([0, 1, 0], dims="a"), "j": DataArray([0, 2, 1], dims="a"), } assert_equal(actual.isel(**indexers2), expected.isel(**indexers2)) def test_compatible_to_netcdf(self) -> None: # make sure it can be saved as a netcdf with self.create_datasets() as (actual, expected): with create_tmp_file() as tmp_file: actual.to_netcdf(tmp_file) with open_dataset(tmp_file) as actual2: assert_equal(actual2, expected) @requires_dask def test_dask(self) -> None: with self.create_datasets(chunks={"j": 2}) as (actual, expected): assert_equal(actual, expected) @network @requires_scipy_or_netCDF4 @requires_pydap
TestPydap
python
redis__redis-py
tests/test_asyncio/test_connection_pool.py
{ "start": 18382, "end": 19297 }
class ____: def test_extra_typed_querystring_options(self): pool = redis.BlockingConnectionPool.from_url( "redis://localhost/2?socket_timeout=20&socket_connect_timeout=10" "&socket_keepalive=&retry_on_timeout=Yes&max_connections=10&timeout=13.37" ) assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { "host": "localhost", "db": 2, "socket_timeout": 20.0, "socket_connect_timeout": 10.0, "retry_on_timeout": True, } assert pool.max_connections == 10 assert pool.timeout == 13.37 def test_invalid_extra_typed_querystring_options(self): with pytest.raises(ValueError): redis.BlockingConnectionPool.from_url( "redis://localhost/2?timeout=_not_a_float_" )
TestBlockingConnectionPoolURLParsing
python
django-guardian__django-guardian
guardian/testapp/models.py
{ "start": 3902, "end": 3998 }
class ____(models.Model): created_on = models.DateTimeField(auto_now_add=True)
ParentTestModel
python
Textualize__textual
docs/examples/how-to/center09.py
{ "start": 80, "end": 525 }
class ____(App): """How to center things.""" CSS = """ Screen { align: center middle; } .words { background: blue 50%; border: wide white; width: auto; } """ def compose(self) -> ComposeResult: yield Static("How about a nice game", classes="words") yield Static("of chess?", classes="words") if __name__ == "__main__": app = CenterApp() app.run()
CenterApp
python
graphql-python__graphene
examples/complex_example.py
{ "start": 478, "end": 677 }
class ____(graphene.Mutation): class Arguments: geo = GeoInput(required=True) Output = Address def mutate(root, info, geo): return Address(latlng=geo.latlng)
CreateAddress
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocolModule4.py
{ "start": 321, "end": 505 }
class ____(Protocol[Z]): x: FnHandler[Z] m1: ModuleSpec[int] = protocolModule3 m1.x(lambda y: None) # This should generate an error. m2: ModuleSpec[str] = protocolModule3
ModuleSpec
python
sqlalchemy__sqlalchemy
test/orm/declarative/test_dc_transforms.py
{ "start": 33464, "end": 39049 }
class ____(fixtures.TestBase): @testing.variation("collection_type", ["list", "set", "list_set_mismatch"]) def test_no_funny_business( self, dc_decl_base: Type[MappedAsDataclass], collection_type: testing.Variation, ): if collection_type.list: expected = "list" else: expected = "set" with expect_raises_message( exc.ArgumentError, f"For relationship A.bs using dataclass options, " f"default_factory must be exactly <class '{expected}'>", ): class A(dc_decl_base): __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True, init=False) if collection_type.list: bs: Mapped[List["B"]] = relationship( # noqa: F821 default_factory=lambda: [B(data="hi")] # noqa: F821 ) elif collection_type.set: bs: Mapped[Set["B"]] = relationship( # noqa: F821 default_factory=lambda: {B(data="hi")} # noqa: F821 ) elif collection_type.list_set_mismatch: bs: Mapped[Set["B"]] = relationship( # noqa: F821 default_factory=list ) else: collection_type.fail() def test_one_to_one_example(self, dc_decl_base: Type[MappedAsDataclass]): """test example in the relationship docs will derive uselist=False correctly""" class Parent(dc_decl_base): __tablename__ = "parent" id: Mapped[int] = mapped_column(init=False, primary_key=True) child: Mapped["Child"] = relationship( # noqa: F821 back_populates="parent", default=None ) class Child(dc_decl_base): __tablename__ = "child" id: Mapped[int] = mapped_column(init=False, primary_key=True) parent_id: Mapped[int] = mapped_column( ForeignKey("parent.id"), init=False ) parent: Mapped["Parent"] = relationship( back_populates="child", default=None ) c1 = Child() p1 = Parent(child=c1) is_(p1.child, c1) is_(c1.parent, p1) p2 = Parent() is_(p2.child, None) def test_replace_operation_works_w_history_etc( self, registry: _RegistryType ): @mapped_as_dataclass(registry) class A: __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True, init=False) data: Mapped[str] x: Mapped[Optional[int]] = mapped_column(default=None) bs: Mapped[List["B"]] = relationship( # noqa: F821 default_factory=list ) @mapped_as_dataclass(registry) class B: __tablename__ = "b" id: Mapped[int] = mapped_column(primary_key=True, init=False) a_id = mapped_column(ForeignKey("a.id"), init=False) data: Mapped[str] x: Mapped[Optional[int]] = mapped_column(default=None) registry.metadata.create_all(testing.db) with Session(testing.db) as sess: a1 = A("data", 10, [B("b1"), B("b2", x=5), B("b3")]) sess.add(a1) sess.commit() a2 = dataclasses.replace(a1, x=12, bs=[B("b4")]) assert a1 in sess assert not sess.is_modified(a1, include_collections=True) assert a2 not in sess eq_(inspect(a2).attrs.x.history, ([12], (), ())) sess.add(a2) sess.commit() eq_(sess.scalars(select(A.x).order_by(A.id)).all(), [10, 12]) eq_( sess.scalars(select(B.data).order_by(B.id)).all(), ["b1", "b2", "b3", "b4"], ) def test_post_init(self, registry: _RegistryType): @registry.mapped_as_dataclass class A: __tablename__ = "a" id: Mapped[int] = mapped_column(primary_key=True, init=False) data: Mapped[str] = mapped_column(init=False) def __post_init__(self): self.data = "some data" a1 = A() eq_(a1.data, "some data") def test_no_field_args_w_new_style(self, registry: _RegistryType): with expect_raises_message( exc.InvalidRequestError, "SQLAlchemy mapped dataclasses can't consume mapping information", ): @registry.mapped_as_dataclass() class A: __tablename__ = "a" __sa_dataclass_metadata_key__ = "sa" account_id: int = dataclasses.field( init=False, metadata={"sa": Column(Integer, primary_key=True)}, ) def test_no_field_args_w_new_style_two(self, registry: _RegistryType): @dataclasses.dataclass class Base: pass with expect_raises_message( exc.InvalidRequestError, "SQLAlchemy mapped dataclasses can't consume mapping information", ): @registry.mapped_as_dataclass() class A(Base): __tablename__ = "a" __sa_dataclass_metadata_key__ = "sa" account_id: int = dataclasses.field( init=False, metadata={"sa": Column(Integer, primary_key=True)}, )
RelationshipDefaultFactoryTest
python
PyCQA__pylint
tests/functional/u/useless/useless_parent_delegation.py
{ "start": 13710, "end": 13836 }
class ____: def useless(self, *, first): # [useless-parent-delegation] super().useless(first=first)
UselessSuperPy3
python
sqlalchemy__sqlalchemy
test/sql/test_labels.py
{ "start": 27183, "end": 40057 }
class ____(fixtures.TestBase, AssertsCompiledSQL): """Test the :class:`.WrapsColumnExpression` mixin, which provides auto-labels that match a named expression """ __dialect__ = "default" table1 = table("some_table", column("name"), column("value")) def _fixture(self): class SomeColThing(WrapsColumnExpression, ColumnElement): inherit_cache = False def __init__(self, expression): self.clause = coercions.expect( roles.ExpressionElementRole, expression ) @property def wrapped_column_expression(self): return self.clause @compiles(SomeColThing) def process(element, compiler, **kw): return "SOME_COL_THING(%s)" % compiler.process( element.clause, **kw ) return SomeColThing @testing.fixture def compiler_column_fixture(self): return self._fixture() @testing.fixture def column_expression_fixture(self): class MyString(TypeEngine): def column_expression(self, column): return func.lower(column) return table( "some_table", column("name", String), column("value", MyString) ) def test_plain_select_compiler_expression(self, compiler_column_fixture): expr = compiler_column_fixture table1 = self.table1 self.assert_compile( select( table1.c.name, expr(table1.c.value), ), "SELECT some_table.name, SOME_COL_THING(some_table.value) " "AS value FROM some_table", ) def test_plain_select_column_expression(self, column_expression_fixture): table1 = column_expression_fixture self.assert_compile( select(table1), "SELECT some_table.name, lower(some_table.value) AS value " "FROM some_table", ) def test_plain_returning_compiler_expression( self, compiler_column_fixture ): expr = compiler_column_fixture table1 = self.table1 self.assert_compile( insert(table1).returning( table1.c.name, expr(table1.c.value), ), "INSERT INTO some_table (name, value) VALUES (:name, :value) " "RETURNING some_table.name, " "SOME_COL_THING(some_table.value) AS value", ) @testing.combinations("columns", "table", argnames="use_columns") def test_plain_returning_column_expression( self, column_expression_fixture, use_columns ): table1 = column_expression_fixture if use_columns == "columns": stmt = insert(table1).returning(table1) else: stmt = insert(table1).returning(table1.c.name, table1.c.value) self.assert_compile( stmt, "INSERT INTO some_table (name, value) VALUES (:name, :value) " "RETURNING some_table.name, lower(some_table.value) AS value", ) def test_select_dupes_column_expression(self, column_expression_fixture): table1 = column_expression_fixture self.assert_compile( select(table1.c.name, table1.c.value, table1.c.value), "SELECT some_table.name, lower(some_table.value) AS value, " "lower(some_table.value) AS value__1 FROM some_table", ) def test_returning_dupes_column_expression( self, column_expression_fixture ): table1 = column_expression_fixture stmt = insert(table1).returning( table1.c.name, table1.c.value, table1.c.value ) self.assert_compile( stmt, "INSERT INTO some_table (name, value) VALUES (:name, :value) " "RETURNING some_table.name, lower(some_table.value) AS value, " "lower(some_table.value) AS value__1", ) def test_column_auto_label_dupes_label_style_none(self): expr = self._fixture() table1 = self.table1 self.assert_compile( select( table1.c.name, table1.c.name, expr(table1.c.name), expr(table1.c.name), ).set_label_style(LABEL_STYLE_NONE), "SELECT some_table.name, some_table.name, " "SOME_COL_THING(some_table.name) AS name, " "SOME_COL_THING(some_table.name) AS name FROM some_table", ) def test_column_auto_label_dupes_label_style_disambiguate(self): expr = self._fixture() table1 = self.table1 self.assert_compile( select( table1.c.name, table1.c.name, expr(table1.c.name), expr(table1.c.name), ), "SELECT some_table.name, some_table.name AS name__1, " "SOME_COL_THING(some_table.name) AS name__2, " "SOME_COL_THING(some_table.name) AS name__3 " "FROM some_table", ) def test_anon_expression_fallback(self): expr = self._fixture() table1 = self.table1 self.assert_compile( select(table1.c.name + "foo", expr(table1.c.name + "foo")), "SELECT some_table.name || :name_1 AS anon_1, " "SOME_COL_THING(some_table.name || :name_2) AS anon_2 " "FROM some_table", ) def test_anon_expression_fallback_use_labels(self): expr = self._fixture() table1 = self.table1 self.assert_compile( select( table1.c.name + "foo", expr(table1.c.name + "foo") ).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL), "SELECT some_table.name || :name_1 AS anon_1, " "SOME_COL_THING(some_table.name || :name_2) AS anon_2 " "FROM some_table", ) def test_label_auto_label(self): expr = self._fixture() table1 = self.table1 self.assert_compile( select( expr(table1.c.name.label("foo")), table1.c.name.label("bar"), table1.c.value, ), "SELECT SOME_COL_THING(some_table.name) AS foo, " "some_table.name AS bar, some_table.value FROM some_table", ) def test_cast_auto_label_label_style_none(self): table1 = self.table1 self.assert_compile( select( cast(table1.c.name, Integer), cast(table1.c.name, String), table1.c.name, ).set_label_style(LABEL_STYLE_NONE), "SELECT CAST(some_table.name AS INTEGER) AS name, " "CAST(some_table.name AS VARCHAR) AS name, " "some_table.name FROM some_table", ) def test_cast_auto_label_label_style_disabmiguate(self): table1 = self.table1 self.assert_compile( select( cast(table1.c.name, Integer), cast(table1.c.name, String), table1.c.name, ), "SELECT CAST(some_table.name AS INTEGER) AS name, " "CAST(some_table.name AS VARCHAR) AS name__1, " "some_table.name AS name_1 FROM some_table", ) def test_type_coerce_auto_label_label_style_none(self): table1 = self.table1 self.assert_compile( select( type_coerce(table1.c.name, Integer), type_coerce(table1.c.name, String), table1.c.name, ).set_label_style(LABEL_STYLE_NONE), # ideally type_coerce wouldn't label at all... "SELECT some_table.name AS name, " "some_table.name AS name, " "some_table.name FROM some_table", ) @testing.combinations("inside", "outside") def test_wraps_col_expr_label_propagate(self, cast_location): """test #8084""" table1 = self.table1 if cast_location == "inside": expr = cast(table1.c.name, Integer).label("foo") elif cast_location == "outside": expr = cast(table1.c.name.label("foo"), Integer) else: assert False self.assert_compile( select(expr), "SELECT CAST(some_table.name AS INTEGER) AS foo FROM some_table", ) is_(select(expr).selected_columns.foo, expr) subq = select(expr).subquery() self.assert_compile( select(subq).where(subq.c.foo == 10), "SELECT anon_1.foo FROM (SELECT CAST(some_table.name AS INTEGER) " "AS foo FROM some_table) AS anon_1 WHERE anon_1.foo = :foo_1", checkparams={"foo_1": 10}, ) def test_type_coerce_auto_label_label_style_disambiguate(self): table1 = self.table1 self.assert_compile( select( type_coerce(table1.c.name, Integer), type_coerce(table1.c.name, String), table1.c.name, ), # ideally type_coerce wouldn't label at all... "SELECT some_table.name AS name, " "some_table.name AS name__1, " "some_table.name AS name_1 FROM some_table", ) @testing.variation("native_boolean", [True, False]) def test_boolean_auto_label(self, native_boolean): col = column("value", Boolean) if native_boolean: self.assert_compile( select(~col, col), "SELECT NOT value, value", supports_native_boolean=True, use_default_dialect=True, ) else: self.assert_compile( select(~col, col), # not sure if this SQL is right but this is what it was # before the new labeling, just different label name "SELECT value = 0 AS value, value", ) def test_label_auto_label_use_labels(self): expr = self._fixture() table1 = self.table1 self.assert_compile( select( expr(table1.c.name.label("foo")), table1.c.name.label("bar"), table1.c.value, ).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL), # the expr around label is treated the same way as plain column # with label "SELECT SOME_COL_THING(some_table.name) AS foo, " "some_table.name AS bar, " "some_table.value AS some_table_value FROM some_table", ) def test_column_auto_label_dupes_use_labels(self): expr = self._fixture() table1 = self.table1 self.assert_compile( select( table1.c.name, table1.c.name, expr(table1.c.name), expr(table1.c.name), ).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL), "SELECT some_table.name AS some_table_name, " "some_table.name AS some_table_name__1, " "SOME_COL_THING(some_table.name) AS some_table_name_1, " "SOME_COL_THING(some_table.name) AS some_table_name_2 " "FROM some_table", ) def test_column_auto_label_use_labels(self): expr = self._fixture() table1 = self.table1 self.assert_compile( select(table1.c.name, expr(table1.c.value)).set_label_style( LABEL_STYLE_TABLENAME_PLUS_COL ), "SELECT some_table.name AS some_table_name, " "SOME_COL_THING(some_table.value) " "AS some_table_value FROM some_table", ) @testing.combinations( # the resulting strings are completely arbitrary and are not # exposed in SQL with current implementations. we want to # only assert that the operation doesn't fail. It's safe to # change the assertion cases for this test if the label escaping # format changes (literal_column("'(1,2]'"), "'_1,2]'_1"), (literal_column("))"), "__1"), (literal_column("'%('"), "'_'_1"), ) def test_labels_w_strformat_chars_in_isolation(self, test_case, expected): """test #8724""" pa = prefix_anon_map() eq_(test_case._anon_key_label % pa, expected) @testing.combinations( ( select(literal_column("'(1,2]'"), literal_column("'(1,2]'")), "SELECT '(1,2]', '(1,2]'", ), (select(literal_column("))"), literal_column("))")), "SELECT )), ))"), ( select(literal_column("'%('"), literal_column("'%('")), "SELECT '%(', '%('", ), ) def test_labels_w_strformat_chars_in_statements(self, test_case, expected): """test #8724""" self.assert_compile(test_case, expected)
ColExprLabelTest
python
openai__openai-python
src/openai/types/audio/transcription_create_params.py
{ "start": 4662, "end": 5502 }
class ____(TypedDict, total=False): type: Required[Literal["server_vad"]] """Must be set to `server_vad` to enable manual chunking using server side VAD.""" prefix_padding_ms: int """Amount of audio to include before the VAD detected speech (in milliseconds).""" silence_duration_ms: int """ Duration of silence to detect speech stop (in milliseconds). With shorter values the model will respond more quickly, but may jump in on short pauses from the user. """ threshold: float """Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher threshold will require louder audio to activate the model, and thus might perform better in noisy environments. """ ChunkingStrategy: TypeAlias = Union[Literal["auto"], ChunkingStrategyVadConfig]
ChunkingStrategyVadConfig
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/linalg_ops_test.py
{ "start": 14004, "end": 14188 }
class ____(test.TestCase, _PinvTest): dtype = np.float32 use_static_shape = False use_default_rcond = True @test_util.run_all_in_graph_and_eager_modes
PinvTestDynamic32DefaultRcond
python
google__jax
tests/blocked_sampler_test.py
{ "start": 3003, "end": 4917 }
class ____(jtu.JaxTestCase): @parameterized.named_parameters( dict(testcase_name='8x128_vs_16x256', total_size=(32, 256), block_size_a=(8, 128), block_size_b=(16, 256), tile_size=(8, 128), transpose_grid=False), dict(testcase_name='transpose_8x128_vs_16x256', total_size=(32, 256), block_size_a=(8, 128), block_size_b=(16, 256), tile_size=(8, 128), transpose_grid=True), dict(testcase_name='8x128_vs_32x128', total_size=(32, 128), block_size_a=(8, 128), block_size_b=(32, 128), tile_size=(8, 128), transpose_grid=False), dict(testcase_name='16x256_vs_32x128', total_size=(32, 256), block_size_a=(16, 256), block_size_b=(32, 128), tile_size=(8, 128), transpose_grid=False), dict(testcase_name='128x128_vs_128x256_padding', total_size=(256, 128), block_size_a=(128, 128), block_size_b=(128, 256), tile_size=(128, 128), transpose_grid=False), dict(testcase_name='128x128_vs_128x256_padding2', total_size=(257, 129), block_size_a=(128, 128), block_size_b=(128, 256), tile_size=(128, 128), transpose_grid=False), ) def test_block_shape_invariance(self, total_size, block_size_a, block_size_b, tile_size, transpose_grid): global_key = jax.random.key(0) ceil_div = lambda x, y: (x + y - 1) // y grid_a = tuple(ceil_div(_tot, _blk) for _tot, _blk in zip(total_size, block_size_a)) result_a = call_kernel( uniform_kernel, grid_a, transpose_grid, global_key, total_size, block_size_a, tile_size) grid_b = tuple(ceil_div(_tot, _blk) for _tot, _blk in zip(total_size, block_size_b)) result_b = call_kernel( uniform_kernel, grid_b, transpose_grid, global_key, total_size, block_size_b, tile_size) np.testing.assert_array_equal(result_a, result_b)
BlockedSamplerTest
python
scipy__scipy
scipy/special/tests/test_ndtri_exp.py
{ "start": 428, "end": 3708 }
class ____: """Tests that ndtri_exp is sufficiently close to an inverse of log_ndtr. We have separate tests for the five intervals (-inf, -10), [-10, -2), [-2, -0.14542), [-0.14542, -1e-6), and [-1e-6, 0). ndtri_exp(y) is computed in three different ways depending on if y is in (-inf, -2), [-2, log(1 - exp(-2))], or [log(1 - exp(-2), 0). Each of these intervals is given its own test with two additional tests for handling very small values and values very close to zero. """ @pytest.mark.parametrize( "test_input", [-1e1, -1e2, -1e10, -1e20, -np.finfo(float).max] ) def test_very_small_arg(self, test_input, uniform_random_points): scale = test_input points = scale * (0.5 * uniform_random_points + 0.5) assert_func_equal( log_ndtr_ndtri_exp, lambda y: y, points, rtol=1e-14, nan_ok=True ) @pytest.mark.parametrize( "interval,expected_rtol", [ ((-10, -2), 1e-14), ((-2, -0.14542), 1e-12), ((-0.14542, -1e-6), 1e-10), ((-1e-6, 0), 1e-6), ], ) def test_in_interval(self, interval, expected_rtol, uniform_random_points): left, right = interval points = (right - left) * uniform_random_points + left assert_func_equal( log_ndtr_ndtri_exp, lambda y: y, points, rtol=expected_rtol, nan_ok=True ) def test_extreme(self): # bigneg is not quite the largest negative double precision value. # Here's why: # The round-trip calculation # y = ndtri_exp(bigneg) # bigneg2 = log_ndtr(y) # where bigneg is a very large negative value, would--with infinite # precision--result in bigneg2 == bigneg. When bigneg is large enough, # y is effectively equal to -sqrt(2)*sqrt(-bigneg), and log_ndtr(y) is # effectively -(y/sqrt(2))**2. If we use bigneg = np.finfo(float).min, # then by construction, the theoretical value is the most negative # finite value that can be represented with 64 bit float point. This # means tiny changes in how the computation proceeds can result in the # return value being -inf. (E.g. changing the constant representation # of 1/sqrt(2) from 0.7071067811865475--which is the value returned by # 1/np.sqrt(2)--to 0.7071067811865476--which is the most accurate 64 # bit floating point representation of 1/sqrt(2)--results in the # round-trip that starts with np.finfo(float).min returning -inf. So # we'll move the bigneg value a few ULPs towards 0 to avoid this # sensitivity. # Use the reduce method to apply nextafter four times. bigneg = np.nextafter.reduce([np.finfo(float).min, 0, 0, 0, 0]) # tinyneg is approx. -2.225e-308. tinyneg = -np.finfo(float).tiny x = np.array([tinyneg, bigneg]) result = log_ndtr_ndtri_exp(x) assert_allclose(result, x, rtol=1e-12) def test_asymptotes(self): assert_equal(ndtri_exp([-np.inf, 0.0]), [-np.inf, np.inf]) def test_outside_domain(self): assert np.isnan(ndtri_exp(1.0))
TestNdtriExp
python
getsentry__sentry
src/sentry/integrations/slack/threads/activity_notifications.py
{ "start": 335, "end": 4417 }
class ____: """ Class responsible for helping derive data from a specific activity type """ _NO_PROVIDER_KEY_METRICS = "sentry.integrations.slack.tasks.activity_notifications.external_issue_created_activity.missing_provider" _NO_LINK_KEY_METRICS = "sentry.integrations.slack.tasks.activity_notifications.external_issue_created_activity.missing_link" _NO_LABEL_KEY_METRICS = "sentry.integrations.slack.tasks.activity_notifications.external_issue_created_activity.missing_label" DEFAULT_PROVIDER_FALLBACK_TEXT = "external provider" _PROVIDER_KEY = "provider" _TICKET_KEY = "label" _URL_KEY = "location" def __init__(self, activity: Activity) -> None: try: activity_type: ActivityType = ActivityType(activity.type) except ValueError as err: _default_logger.info( "there was an error trying to get activity type, assuming activity is unsupported", exc_info=err, extra={ "error": str(err), "activity_id": activity.id, "activity_type_raw": activity.type, }, ) raise if activity_type != ActivityType.CREATE_ISSUE: _default_logger.info( "tried to use external issue creator for an improper activity type", extra={ "activity_id": activity.id, "activity_type_raw": activity.type, "activity_type": activity_type, }, ) raise Exception(f"Activity type {activity_type} is incorrect") self._activity: Activity = activity def get_link(self) -> str: """ Returns the link to where the issue was created in the external provider. """ link = self._activity.data.get(self._URL_KEY, None) if not link: metrics.incr( self._NO_LINK_KEY_METRICS, sample_rate=1.0, ) _default_logger.info( "Activity does not have a url key, using fallback", extra={ "activity_id": self._activity.id, }, ) link = "" return link def get_provider(self) -> str: """ Returns the provider of the activity for where the issue was created. Returns the value in lowercase to provider consistent value. If key is not found, or value is empty, uses the fallback value. """ provider = self._activity.data.get(self._PROVIDER_KEY, None) if not provider: metrics.incr( self._NO_PROVIDER_KEY_METRICS, sample_rate=1.0, ) _default_logger.info( "Activity does not have a provider key, using fallback", extra={ "activity_id": self._activity.id, }, ) provider = self.DEFAULT_PROVIDER_FALLBACK_TEXT return provider.lower() def get_ticket_number(self) -> str: """ Returns the ticket number for the issue that was created on the external provider. """ ticket_number = self._activity.data.get(self._TICKET_KEY, None) if not ticket_number: metrics.incr( self._NO_LABEL_KEY_METRICS, sample_rate=1.0, ) _default_logger.info( "Activity does not have a label key, using fallback", extra={ "activity_id": self._activity.id, }, ) ticket_number = "" return ticket_number def get_formatted_provider_name(self) -> str: # Make sure to make the proper noun have correct capitalization # I.e. github -> GitHub, jira -> Jira # Special cases like github -> GitHub are implemented in their overriden classes return self.get_provider().capitalize()
_ExternalIssueCreatedActivity
python
pandas-dev__pandas
asv_bench/benchmarks/io/csv.py
{ "start": 2449, "end": 2728 }
class ____(BaseIO): fname = "__test__.csv" def setup(self): rng = date_range("1/1/2000", periods=1000) self.data = DataFrame(rng, index=rng) def time_frame_date_formatting(self): self.data.to_csv(self.fname, date_format="%Y%m%d")
ToCSVDatetime
python
mahmoud__boltons
boltons/jsonutils.py
{ "start": 4458, "end": 9864 }
class ____: """The ``JSONLIterator`` is used to iterate over JSON-encoded objects stored in the `JSON Lines format`_ (one object per line). Most notably it has the ability to efficiently read from the bottom of files, making it very effective for reading in simple append-only JSONL use cases. It also has the ability to start from anywhere in the file and ignore corrupted lines. Args: file_obj (file): An open file object. ignore_errors (bool): Whether to skip over lines that raise an error on deserialization (:func:`json.loads`). reverse (bool): Controls the direction of the iteration. Defaults to ``False``. If set to ``True`` and *rel_seek* is unset, seeks to the end of the file before iteration begins. rel_seek (float): Used to preseek the start position of iteration. Set to 0.0 for the start of the file, 1.0 for the end, and anything in between. .. _JSON Lines format: http://jsonlines.org/ """ def __init__(self, file_obj, ignore_errors=False, reverse=False, rel_seek=None): self._reverse = bool(reverse) self._file_obj = file_obj self.ignore_errors = ignore_errors if rel_seek is None: if reverse: rel_seek = 1.0 elif not -1.0 < rel_seek < 1.0: raise ValueError("'rel_seek' expected a float between" " -1.0 and 1.0, not %r" % rel_seek) elif rel_seek < 0: rel_seek = 1.0 - rel_seek self._rel_seek = rel_seek self._blocksize = 4096 if rel_seek is not None: self._init_rel_seek() if self._reverse: self._line_iter = reverse_iter_lines(self._file_obj, blocksize=self._blocksize, preseek=False) else: self._line_iter = iter(self._file_obj) @property def cur_byte_pos(self): "A property representing where in the file the iterator is reading." return self._file_obj.tell() def _align_to_newline(self): "Aligns the file object's position to the next newline." fo, bsize = self._file_obj, self._blocksize cur, total_read = '', 0 cur_pos = fo.tell() while '\n' not in cur: cur = fo.read(bsize) total_read += bsize try: newline_offset = cur.index('\n') + total_read - bsize except ValueError: raise # TODO: seek to end? fo.seek(cur_pos + newline_offset) def _init_rel_seek(self): "Sets the file object's position to the relative location set above." rs, fo = self._rel_seek, self._file_obj if rs == 0.0: fo.seek(0, os.SEEK_SET) else: fo.seek(0, os.SEEK_END) size = fo.tell() if rs == 1.0: self._cur_pos = size else: target = int(size * rs) fo.seek(target, os.SEEK_SET) self._align_to_newline() self._cur_pos = fo.tell() def __iter__(self): return self def next(self): """Yields one :class:`dict` loaded with :func:`json.loads`, advancing the file object by one line. Raises :exc:`StopIteration` upon reaching the end of the file (or beginning, if ``reverse`` was set to ``True``. """ while 1: line = next(self._line_iter).lstrip() if not line: continue try: obj = json.loads(line) except Exception: if not self.ignore_errors: raise continue return obj __next__ = next if __name__ == '__main__': def _main(): import sys if '-h' in sys.argv or '--help' in sys.argv: print('loads one or more JSON Line files for basic validation.') return verbose = False if '-v' in sys.argv or '--verbose' in sys.argv: verbose = True file_count, obj_count = 0, 0 filenames = sys.argv[1:] for filename in filenames: if filename in ('-h', '--help', '-v', '--verbose'): continue file_count += 1 with open(filename, 'rb') as file_obj: iterator = JSONLIterator(file_obj) cur_obj_count = 0 while 1: try: next(iterator) except ValueError: print('error reading object #%s around byte %s in %s' % (cur_obj_count + 1, iterator.cur_byte_pos, filename)) return except StopIteration: break obj_count += 1 cur_obj_count += 1 if verbose and obj_count and obj_count % 100 == 0: sys.stdout.write('.') if obj_count % 10000: sys.stdout.write('%s\n' % obj_count) if verbose: print('files checked: %s' % file_count) print('objects loaded: %s' % obj_count) return _main()
JSONLIterator
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/engine/result.py
{ "start": 73036, "end": 75582 }
class ____(Result[Unpack[_Ts]]): """A :class:`_engine.Result` that gets data from a Python iterator of :class:`_engine.Row` objects or similar row-like data. .. versionadded:: 1.4 """ _hard_closed = False _soft_closed = False def __init__( self, cursor_metadata: ResultMetaData, iterator: Iterator[_InterimSupportsScalarsRowType], raw: Optional[Result[Any]] = None, _source_supports_scalars: bool = False, ): self._metadata = cursor_metadata self.iterator = iterator self.raw = raw self._source_supports_scalars = _source_supports_scalars @property def closed(self) -> bool: """Return ``True`` if this :class:`_engine.IteratorResult` has been closed .. versionadded:: 1.4.43 """ return self._hard_closed def _soft_close(self, hard: bool = False, **kw: Any) -> None: if hard: self._hard_closed = True if self.raw is not None: self.raw._soft_close(hard=hard, **kw) self.iterator = iter([]) self._reset_memoizations() self._soft_closed = True def _raise_hard_closed(self) -> NoReturn: raise exc.ResourceClosedError("This result object is closed.") def _raw_row_iterator(self) -> Iterator[_RowData]: return self.iterator def _fetchiter_impl(self) -> Iterator[_InterimSupportsScalarsRowType]: if self._hard_closed: self._raise_hard_closed() return self.iterator def _fetchone_impl( self, hard_close: bool = False ) -> Optional[_InterimRowType[Row[Unpack[TupleAny]]]]: if self._hard_closed: self._raise_hard_closed() row = next(self.iterator, _NO_ROW) if row is _NO_ROW: self._soft_close(hard=hard_close) return None else: return row def _fetchall_impl( self, ) -> List[_InterimRowType[Row[Unpack[TupleAny]]]]: if self._hard_closed: self._raise_hard_closed() try: return list(self.iterator) finally: self._soft_close() def _fetchmany_impl( self, size: Optional[int] = None ) -> List[_InterimRowType[Row[Unpack[TupleAny]]]]: if self._hard_closed: self._raise_hard_closed() return list(itertools.islice(self.iterator, 0, size)) def null_result() -> IteratorResult[Any]: return IteratorResult(SimpleResultMetaData([]), iter([]))
IteratorResult
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/ext/indexable.py
{ "start": 7459, "end": 11761 }
class ____(hybrid_property[_T]): """A property generator. The generated property describes an object attribute that corresponds to an :class:`_types.Indexable` column. .. seealso:: :mod:`sqlalchemy.ext.indexable` """ _NO_DEFAULT_ARGUMENT = cast(_T, object()) def __init__( self, attr_name: str, index: Union[int, str], default: _T = _NO_DEFAULT_ARGUMENT, datatype: Optional[Callable[[], Any]] = None, mutable: bool = True, onebased: bool = True, ): """Create a new :class:`.index_property`. :param attr_name: An attribute name of an `Indexable` typed column, or other attribute that returns an indexable structure. :param index: The index to be used for getting and setting this value. This should be the Python-side index value for integers. :param default: A value which will be returned instead of `AttributeError` when there is not a value at given index. :param datatype: default datatype to use when the field is empty. By default, this is derived from the type of index used; a Python list for an integer index, or a Python dictionary for any other style of index. For a list, the list will be initialized to a list of None values that is at least ``index`` elements long. :param mutable: if False, writes and deletes to the attribute will be disallowed. :param onebased: assume the SQL representation of this value is one-based; that is, the first index in SQL is 1, not zero. """ if mutable: super().__init__(self.fget, self.fset, self.fdel, self.expr) else: super().__init__(self.fget, None, None, self.expr) self.attr_name = attr_name self.index = index self.default = default is_numeric = isinstance(index, int) onebased = is_numeric and onebased if datatype is not None: self.datatype = datatype else: if is_numeric: self.datatype = lambda: [None for x in range(index + 1)] # type: ignore[operator] # noqa: E501 else: self.datatype = dict self.onebased = onebased def _fget_default(self, err: Optional[BaseException] = None) -> _T: if self.default == self._NO_DEFAULT_ARGUMENT: raise AttributeError(self.attr_name) from err else: return self.default def fget(self, instance: Any, /) -> _T: attr_name = self.attr_name column_value = getattr(instance, attr_name) if column_value is None: return self._fget_default() try: value = column_value[self.index] except (KeyError, IndexError) as err: return self._fget_default(err) else: return value # type: ignore[no-any-return] def fset(self, instance: Any, value: _T) -> None: attr_name = self.attr_name column_value = getattr(instance, attr_name, None) if column_value is None: column_value = self.datatype() setattr(instance, attr_name, column_value) column_value[self.index] = value setattr(instance, attr_name, column_value) if attr_name in inspect(instance).mapper.attrs: flag_modified(instance, attr_name) def fdel(self, instance: Any) -> None: attr_name = self.attr_name column_value = getattr(instance, attr_name) if column_value is None: raise AttributeError(self.attr_name) try: del column_value[self.index] except KeyError as err: raise AttributeError(self.attr_name) from err else: setattr(instance, attr_name, column_value) flag_modified(instance, attr_name) def expr( self, model: Any ) -> Union[_HasClauseElement[_T], SQLColumnExpression[_T]]: column = getattr(model, self.attr_name) index = self.index if self.onebased: index += 1 # type: ignore[operator] return column[index] # type: ignore[no-any-return]
index_property
python
encode__django-rest-framework
tests/test_request.py
{ "start": 6746, "end": 7549 }
class ____(TestCase): def setUp(self): self.csrf_client = APIClient(enforce_csrf_checks=True) self.username = 'john' self.email = 'lennon@thebeatles.com' self.password = 'password' self.user = User.objects.create_user(self.username, self.email, self.password) def test_user_logged_in_authentication_has_POST_when_not_logged_in(self): """ Ensures request.POST exists after SessionAuthentication when user doesn't log in. """ content = {'example': 'example'} response = self.client.post('/', content) assert status.HTTP_200_OK == response.status_code response = self.csrf_client.post('/', content) assert status.HTTP_200_OK == response.status_code
TestContentParsingWithAuthentication
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/image_caption/base.py
{ "start": 235, "end": 3034 }
class ____(BaseReader): """ Image parser. Caption image using Blip. """ def __init__( self, parser_config: Optional[Dict] = None, keep_image: bool = False, prompt: Optional[str] = None, ): """Init params.""" if parser_config is None: """Init parser.""" try: import sentencepiece # noqa import torch from PIL import Image # noqa from transformers import BlipForConditionalGeneration, BlipProcessor except ImportError: raise ImportError( "Please install extra dependencies that are required for " "the ImageCaptionReader: " "`pip install torch transformers sentencepiece Pillow`" ) device = infer_torch_device() dtype = torch.float16 if torch.cuda.is_available() else torch.float32 processor = BlipProcessor.from_pretrained( "Salesforce/blip-image-captioning-large" ) model = BlipForConditionalGeneration.from_pretrained( "Salesforce/blip-image-captioning-large", torch_dtype=dtype ) parser_config = { "processor": processor, "model": model, "device": device, "dtype": dtype, } self._parser_config = parser_config self._keep_image = keep_image self._prompt = prompt def load_data( self, file: Path, extra_info: Optional[Dict] = None ) -> List[Document]: """Parse file.""" from llama_index.core.img_utils import img_2_b64 from PIL import Image # load document image image = Image.open(file) if image.mode != "RGB": image = image.convert("RGB") # Encode image into base64 string and keep in document image_str: Optional[str] = None if self._keep_image: image_str = img_2_b64(image) # Parse image into text model = self._parser_config["model"] processor = self._parser_config["processor"] device = self._parser_config["device"] dtype = self._parser_config["dtype"] model.to(device) # unconditional image captioning inputs = processor(image, self._prompt, return_tensors="pt").to(device, dtype) out = model.generate(**inputs) text_str = processor.decode(out[0], skip_special_tokens=True) return [ ImageDocument( text=text_str, image=image_str, image_path=str(file), metadata=extra_info or {}, ) ]
ImageCaptionReader
python
cython__cython
Cython/Debugger/Tests/test_libcython_in_gdb.py
{ "start": 16045, "end": 19077 }
class ____(DebugTestCase): def break_and_run_func(self, funcname): gdb.execute('cy break ' + funcname) gdb.execute('cy run') def test_inner(self): self.break_and_run_func('inner') self.assertEqual('', gdb.execute('cy locals', to_string=True)) # Allow the Cython-generated code to initialize the scope variable gdb.execute('cy step') self.assertEqual(str(self.read_var('a')), "'an object'") print_result = gdb.execute('cy print a', to_string=True).strip() self.assertEqual(print_result, "a = 'an object'") def test_outer(self): self.break_and_run_func('outer') self.assertEqual('', gdb.execute('cy locals', to_string=True)) # Initialize scope with 'a' uninitialized gdb.execute('cy step') self.assertEqual('', gdb.execute('cy locals', to_string=True)) # Initialize 'a' to 1 gdb.execute('cy step') print_result = gdb.execute('cy print a', to_string=True).strip() self.assertEqual(print_result, "a = 'an object'") _do_debug = os.environ.get('GDB_DEBUG') if _do_debug: _debug_file = open('/dev/tty', 'w') def _debug(*messages): if _do_debug: messages = itertools.chain([sys._getframe(1).f_code.co_name, ':'], messages) _debug_file.write(' '.join(str(msg) for msg in messages) + '\n') def run_unittest_in_module(modulename): # Check if the Python executable provides a symbol table. if not hasattr(gdb.selected_inferior().progspace, "symbol_file"): msg = ("Unable to run tests, Python was not compiled with " "debugging information. Either compile python with " "-g or get a debug build (configure with --with-pydebug).") warnings.warn(msg) os._exit(1) else: m = __import__(modulename, fromlist=['']) tests = inspect.getmembers(m, inspect.isclass) # test_support.run_unittest(tests) test_loader = unittest.TestLoader() suite = unittest.TestSuite( [test_loader.loadTestsFromTestCase(cls) for name, cls in tests]) result = unittest.TextTestRunner(verbosity=1).run(suite) return result.wasSuccessful() def runtests(): """ Run the libcython and libpython tests. Ensure that an appropriate status is returned to the parent test process. """ from Cython.Debugger.Tests import test_libpython_in_gdb success_libcython = run_unittest_in_module(__name__) success_libpython = run_unittest_in_module(test_libpython_in_gdb.__name__) if not success_libcython or not success_libpython: sys.exit(2) def main(version, trace_code=False): global inferior_python_version inferior_python_version = version if trace_code: tracer = trace.Trace(count=False, trace=True, outfile=sys.stderr, ignoredirs=[sys.prefix, sys.exec_prefix]) tracer.runfunc(runtests) else: runtests()
TestClosure
python
readthedocs__readthedocs.org
readthedocs/oauth/models.py
{ "start": 13619, "end": 14326 }
class ____(TimeStampedModel): remote_repository = models.ForeignKey( RemoteRepository, related_name="remote_repository_relations", on_delete=models.CASCADE, ) user = models.ForeignKey( User, related_name="remote_repository_relations", on_delete=models.CASCADE ) account = models.ForeignKey( SocialAccount, verbose_name=_("Connected account"), related_name="remote_repository_relations", on_delete=models.CASCADE, ) admin = models.BooleanField(_("Has admin privilege"), default=False) class Meta: unique_together = ( "remote_repository", "account", )
RemoteRepositoryRelation
python
django__django
tests/migrations/test_questioner.py
{ "start": 361, "end": 1248 }
class ____(SimpleTestCase): @override_settings( INSTALLED_APPS=["migrations"], MIGRATION_MODULES={"migrations": None}, ) def test_ask_initial_with_disabled_migrations(self): questioner = MigrationQuestioner() self.assertIs(False, questioner.ask_initial("migrations")) def test_ask_not_null_alteration(self): questioner = MigrationQuestioner() self.assertIsNone( questioner.ask_not_null_alteration("field_name", "model_name") ) @mock.patch("builtins.input", return_value="2") def test_ask_not_null_alteration_not_provided(self, mock): questioner = InteractiveMigrationQuestioner( prompt_output=OutputWrapper(StringIO()) ) question = questioner.ask_not_null_alteration("field_name", "model_name") self.assertEqual(question, NOT_PROVIDED)
QuestionerTests
python
huggingface__transformers
src/transformers/models/dab_detr/modeling_dab_detr.py
{ "start": 66081, "end": 74444 }
class ____(DabDetrPreTrainedModel): # When using clones, all layers > 0 will be clones, but layer 0 *is* required _tied_weights_keys = {"model.decoder.bbox_embed": "bbox_predictor"} def __init__(self, config: DabDetrConfig): super().__init__(config) self.config = config self.auxiliary_loss = config.auxiliary_loss self.query_dim = config.query_dim # DAB-DETR encoder-decoder model self.model = DabDetrModel(config) # Object detection heads self.class_embed = nn.Linear(config.hidden_size, config.num_labels) # Default bbox_embed_diff_each_layer is False self.bbox_predictor = DabDetrMLP(config.hidden_size, config.hidden_size, 4, 3) # Default iter_update is True self.model.decoder.bbox_embed = self.bbox_predictor # Initialize weights and apply final processing self.post_init() # taken from https://github.com/Atten4Vis/conditionalDETR/blob/master/models/dab_detr.py def _set_aux_loss(self, outputs_class, outputs_coord): return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @auto_docstring def forward( self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[list[dict]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.FloatTensor], DabDetrObjectDetectionOutput]: r""" decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. labels (`list[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("IDEA-Research/dab-detr-resnet-50") >>> model = AutoModelForObjectDetection.from_pretrained("IDEA-Research/dab-detr-resnet-50") >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> target_sizes = torch.tensor([(image.height, image.width)]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[0] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.833 at location [38.31, 72.1, 177.63, 118.45] Detected cat with confidence 0.831 at location [9.2, 51.38, 321.13, 469.0] Detected cat with confidence 0.804 at location [340.3, 16.85, 642.93, 370.95] Detected remote with confidence 0.683 at location [334.48, 73.49, 366.37, 190.01] Detected couch with confidence 0.535 at location [0.52, 1.19, 640.35, 475.1] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through DAB_DETR base model to obtain encoder + decoder outputs model_outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) reference_points = model_outputs.reference_points if return_dict else model_outputs[-1] intermediate_hidden_states = model_outputs.intermediate_hidden_states if return_dict else model_outputs[-2] # class logits + predicted bounding boxes logits = self.class_embed(intermediate_hidden_states[-1]) reference_before_sigmoid = inverse_sigmoid(reference_points) bbox_with_refinement = self.bbox_predictor(intermediate_hidden_states) bbox_with_refinement[..., : self.query_dim] += reference_before_sigmoid outputs_coord = bbox_with_refinement.sigmoid() pred_boxes = outputs_coord[-1] loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: outputs_class = None if self.config.auxiliary_loss: outputs_class = self.class_embed(intermediate_hidden_states) loss, loss_dict, auxiliary_outputs = self.loss_function( logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord ) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + model_outputs else: output = (logits, pred_boxes) + model_outputs # Since DabDetrObjectDetectionOutput doesn't have reference points + intermedieate_hidden_states we cut down. return ((loss, loss_dict) + output) if loss is not None else output[:-2] return DabDetrObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=model_outputs.last_hidden_state, decoder_hidden_states=model_outputs.decoder_hidden_states if output_hidden_states else None, decoder_attentions=model_outputs.decoder_attentions if output_attentions else None, cross_attentions=model_outputs.cross_attentions if output_attentions else None, encoder_last_hidden_state=model_outputs.encoder_last_hidden_state if output_hidden_states else None, encoder_hidden_states=model_outputs.encoder_hidden_states if output_hidden_states else None, encoder_attentions=model_outputs.encoder_attentions if output_attentions else None, ) __all__ = [ "DabDetrForObjectDetection", "DabDetrModel", "DabDetrPreTrainedModel", ]
DabDetrForObjectDetection
python
keon__algorithms
tests/test_strings.py
{ "start": 13974, "end": 14146 }
class ____(unittest.TestCase): def test_judge_circle(self): self.assertTrue(judge_circle("UDLRUD")) self.assertFalse(judge_circle("LLRU"))
TestJudgeCircle
python
sphinx-doc__sphinx
tests/roots/test-ext-autodoc/target/annotated.py
{ "start": 225, "end": 529 }
class ____: max_length: int whitelisted_words: list[str] def validate(value: str) -> str: return value #: Type alias for a validated string. ValidatedString = Annotated[str, FuncValidator(validate)] def hello(name: Annotated[str, 'attribute']) -> None: """docstring""" pass
MaxLen
python
django__django
tests/auth_tests/test_hashers.py
{ "start": 26029, "end": 30585 }
class ____(SimpleTestCase): def test_argon2(self): encoded = make_password("lètmein", hasher="argon2") self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith("argon2$argon2id$")) self.assertTrue(check_password("lètmein", encoded)) self.assertFalse(check_password("lètmeinz", encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "argon2") # Blank passwords blank_encoded = make_password("", hasher="argon2") self.assertTrue(blank_encoded.startswith("argon2$argon2id$")) self.assertTrue(is_password_usable(blank_encoded)) self.assertTrue(check_password("", blank_encoded)) self.assertFalse(check_password(" ", blank_encoded)) # Old hashes without version attribute encoded = ( "argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO" "4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg" ) self.assertTrue(check_password("secret", encoded)) self.assertFalse(check_password("wrong", encoded)) # Old hashes with version attribute. encoded = "argon2$argon2i$v=19$m=8,t=1,p=1$c2FsdHNhbHQ$YC9+jJCrQhs5R6db7LlN8Q" self.assertIs(check_password("secret", encoded), True) self.assertIs(check_password("wrong", encoded), False) # Salt entropy check. hasher = get_hasher("argon2") encoded_weak_salt = make_password("lètmein", "iodizedsalt", "argon2") encoded_strong_salt = make_password("lètmein", hasher.salt(), "argon2") self.assertIs(hasher.must_update(encoded_weak_salt), True) self.assertIs(hasher.must_update(encoded_strong_salt), False) def test_argon2_decode(self): salt = "abcdefghijk" encoded = make_password("lètmein", salt=salt, hasher="argon2") hasher = get_hasher("argon2") decoded = hasher.decode(encoded) self.assertEqual(decoded["memory_cost"], hasher.memory_cost) self.assertEqual(decoded["parallelism"], hasher.parallelism) self.assertEqual(decoded["salt"], salt) self.assertEqual(decoded["time_cost"], hasher.time_cost) def test_argon2_upgrade(self): self._test_argon2_upgrade("time_cost", "time cost", 1) self._test_argon2_upgrade("memory_cost", "memory cost", 64) self._test_argon2_upgrade("parallelism", "parallelism", 1) def test_argon2_version_upgrade(self): hasher = get_hasher("argon2") state = {"upgraded": False} encoded = ( "argon2$argon2id$v=19$m=102400,t=2,p=8$Y041dExhNkljRUUy$TMa6A8fPJh" "CAUXRhJXCXdw" ) def setter(password): state["upgraded"] = True old_m = hasher.memory_cost old_t = hasher.time_cost old_p = hasher.parallelism try: hasher.memory_cost = 8 hasher.time_cost = 1 hasher.parallelism = 1 self.assertTrue(check_password("secret", encoded, setter, "argon2")) self.assertTrue(state["upgraded"]) finally: hasher.memory_cost = old_m hasher.time_cost = old_t hasher.parallelism = old_p def _test_argon2_upgrade(self, attr, summary_key, new_value): hasher = get_hasher("argon2") self.assertEqual("argon2", hasher.algorithm) self.assertNotEqual(getattr(hasher, attr), new_value) old_value = getattr(hasher, attr) try: # Generate hash with attr set to 1 setattr(hasher, attr, new_value) encoded = make_password("letmein", hasher="argon2") attr_value = hasher.safe_summary(encoded)[summary_key] self.assertEqual(attr_value, new_value) state = {"upgraded": False} def setter(password): state["upgraded"] = True # No upgrade is triggered. self.assertTrue(check_password("letmein", encoded, setter, "argon2")) self.assertFalse(state["upgraded"]) # Revert to the old rounds count and ... setattr(hasher, attr, old_value) # ... check if the password would get updated to the new count. self.assertTrue(check_password("letmein", encoded, setter, "argon2")) self.assertTrue(state["upgraded"]) finally: setattr(hasher, attr, old_value) @skipUnless(scrypt, "scrypt not available") @override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
TestUtilsHashPassArgon2
python
run-llama__llama_index
llama-index-integrations/storage/index_store/llama-index-storage-index-store-postgres/llama_index/storage/index_store/postgres/base.py
{ "start": 177, "end": 2527 }
class ____(KVIndexStore): """ Postgres Index store. Args: postgres_kvstore (PostgresKVStore): Postgres key-value store namespace (str): namespace for the index store """ def __init__( self, postgres_kvstore: PostgresKVStore, namespace: Optional[str] = None, collection_suffix: Optional[str] = None, ) -> None: """Init a PostgresIndexStore.""" super().__init__( postgres_kvstore, namespace=namespace, collection_suffix=collection_suffix ) @classmethod def from_uri( cls, uri: str, namespace: Optional[str] = None, table_name: str = "indexstore", schema_name: str = "public", perform_setup: bool = True, debug: bool = False, use_jsonb: bool = False, collection_suffix: Optional[str] = None, ) -> "PostgresIndexStore": """Load a PostgresIndexStore from a PostgresURI.""" postgres_kvstore = PostgresKVStore.from_uri( uri=uri, table_name=table_name, schema_name=schema_name, perform_setup=perform_setup, debug=debug, use_jsonb=use_jsonb, ) return cls(postgres_kvstore, namespace, collection_suffix) @classmethod def from_params( cls, host: Optional[str] = None, port: Optional[str] = None, database: Optional[str] = None, user: Optional[str] = None, password: Optional[str] = None, namespace: Optional[str] = None, table_name: str = "indexstore", schema_name: str = "public", perform_setup: bool = True, debug: bool = False, use_jsonb: bool = False, collection_suffix: Optional[str] = None, ) -> "PostgresIndexStore": """Load a PostgresIndexStore from a Postgres host and port.""" postgres_kvstore = PostgresKVStore.from_params( host=host, port=port, database=database, user=user, password=password, table_name=table_name, schema_name=schema_name, perform_setup=perform_setup, debug=debug, use_jsonb=use_jsonb, ) return cls(postgres_kvstore, namespace, collection_suffix)
PostgresIndexStore
python
doocs__leetcode
solution/3100-3199/3144.Minimum Substring Partition of Equal Character Frequency/Solution2.py
{ "start": 0, "end": 556 }
class ____: def minimumSubstringsInPartition(self, s: str) -> int: @cache def dfs(i: int) -> int: if i >= n: return 0 cnt = defaultdict(int) m = 0 ans = n - i for j in range(i, n): cnt[s[j]] += 1 m = max(m, cnt[s[j]]) if j - i + 1 == m * len(cnt): ans = min(ans, 1 + dfs(j + 1)) return ans n = len(s) ans = dfs(0) dfs.cache_clear() return ans
Solution
python
PyCQA__pylint
tests/functional/u/unsupported/unsupported_version_for_final.py
{ "start": 794, "end": 984 }
class ____: @typing.final # [using-final-decorator-in-unsupported-version] def my_method(self): pass @mytyping.final # [using-final-decorator-in-unsupported-version]
MyClass3
python
pyca__cryptography
tests/hazmat/primitives/test_hashes.py
{ "start": 2538, "end": 2789 }
class ____: test_sha512 = generate_base_hash_test( hashes.SHA512(), digest_size=64, ) @pytest.mark.supported( only_if=lambda backend: backend.hash_supported(hashes.MD5()), skip_message="Does not support MD5", )
TestSHA512
python
django__django
tests/backends/test_ddl_references.py
{ "start": 6353, "end": 7528 }
class ____: def __init__( self, representation, referenced_tables, referenced_columns, referenced_indexes ): self.representation = representation self.referenced_tables = referenced_tables self.referenced_columns = referenced_columns self.referenced_indexes = referenced_indexes def references_table(self, table): return table in self.referenced_tables def references_column(self, table, column): return (table, column) in self.referenced_columns def references_index(self, table, index): return (table, index) in self.referenced_indexes def rename_table_references(self, old_table, new_table): if old_table in self.referenced_tables: self.referenced_tables.remove(old_table) self.referenced_tables.add(new_table) def rename_column_references(self, table, old_column, new_column): column = (table, old_column) if column in self.referenced_columns: self.referenced_columns.remove(column) self.referenced_columns.add((table, new_column)) def __str__(self): return self.representation
MockReference
python
pydantic__pydantic
tests/typechecking/with_config_decorator.py
{ "start": 234, "end": 386 }
class ____(TypedDict): pass @with_config(config=ConfigDict(str_to_lower=True)) # type: ignore[deprecated] # pyright: ignore[reportDeprecated]
Model2
python
coleifer__peewee
tests/sql.py
{ "start": 86634, "end": 91824 }
class ____(BaseTestCase): database = PostgresqlDatabase(None) def test_ignore(self): query = Person.insert(name='huey').on_conflict('ignore') self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?) ' 'ON CONFLICT DO NOTHING'), ['huey']) def test_conflict_target_required(self): query = Person.insert(name='huey').on_conflict(preserve=(Person.dob,)) with self.assertRaisesCtx(ValueError): self.database.get_sql_context().parse(query) def test_conflict_resolution_required(self): query = Person.insert(name='huey').on_conflict(conflict_target='name') with self.assertRaisesCtx(ValueError): self.database.get_sql_context().parse(query) def test_conflict_update_excluded(self): KV = Table('kv', ('key', 'value', 'extra'), _database=self.database) query = (KV.insert(key='k1', value='v1', extra=1) .on_conflict(conflict_target=(KV.key, KV.value), update={KV.extra: EXCLUDED.extra + 2}, where=(EXCLUDED.extra < KV.extra))) self.assertSQL(query, ( 'INSERT INTO "kv" ("extra", "key", "value") VALUES (?, ?, ?) ' 'ON CONFLICT ("key", "value") DO UPDATE ' 'SET "extra" = (EXCLUDED."extra" + ?) ' 'WHERE (EXCLUDED."extra" < "kv"."extra")'), [1, 'k1', 'v1', 2]) def test_conflict_target_or_constraint(self): KV = Table('kv', ('key', 'value', 'extra'), _database=self.database) query = (KV.insert(key='k1', value='v1', extra='e1') .on_conflict(conflict_target=[KV.key, KV.value], preserve=[KV.extra])) self.assertSQL(query, ( 'INSERT INTO "kv" ("extra", "key", "value") VALUES (?, ?, ?) ' 'ON CONFLICT ("key", "value") DO UPDATE ' 'SET "extra" = EXCLUDED."extra"'), ['e1', 'k1', 'v1']) query = (KV.insert(key='k1', value='v1', extra='e1') .on_conflict(conflict_constraint='kv_key_value', preserve=[KV.extra])) self.assertSQL(query, ( 'INSERT INTO "kv" ("extra", "key", "value") VALUES (?, ?, ?) ' 'ON CONFLICT ON CONSTRAINT "kv_key_value" DO UPDATE ' 'SET "extra" = EXCLUDED."extra"'), ['e1', 'k1', 'v1']) query = KV.insert(key='k1', value='v1', extra='e1') self.assertRaises(ValueError, query.on_conflict, conflict_target=[KV.key, KV.value], conflict_constraint='kv_key_value') def test_update(self): dob = datetime.date(2010, 1, 1) query = (Person .insert(name='huey', dob=dob) .on_conflict( conflict_target=(Person.name,), preserve=(Person.dob,), update={Person.name: Person.name.concat('-x')})) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON CONFLICT ("name") DO ' 'UPDATE SET "dob" = EXCLUDED."dob", ' '"name" = ("person"."name" || ?)'), [dob, 'huey', '-x']) query = (Person .insert(name='huey', dob=dob) .on_conflict( conflict_target='name', preserve='dob')) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") VALUES (?, ?) ' 'ON CONFLICT ("name") DO ' 'UPDATE SET "dob" = EXCLUDED."dob"'), [dob, 'huey']) query = (Person .insert(name='huey') .on_conflict( conflict_target=Person.name, preserve=Person.dob, update={Person.name: Person.name.concat('-x')}, where=(Person.name != 'zaizee'))) self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?) ' 'ON CONFLICT ("name") DO ' 'UPDATE SET "dob" = EXCLUDED."dob", ' '"name" = ("person"."name" || ?) ' 'WHERE ("person"."name" != ?)'), ['huey', '-x', 'zaizee']) def test_conflict_target_partial_index(self): KVE = Table('kve', ('key', 'value', 'extra')) data = [('k1', 1, 2), ('k2', 2, 3)] columns = [KVE.key, KVE.value, KVE.extra] query = (KVE .insert(data, columns) .on_conflict( conflict_target=(KVE.key, KVE.value), conflict_where=(KVE.extra > 1), preserve=(KVE.extra,), where=(KVE.key != 'kx'))) self.assertSQL(query, ( 'INSERT INTO "kve" ("key", "value", "extra") ' 'VALUES (?, ?, ?), (?, ?, ?) ' 'ON CONFLICT ("key", "value") WHERE ("extra" > ?) ' 'DO UPDATE SET "extra" = EXCLUDED."extra" ' 'WHERE ("kve"."key" != ?)'), ['k1', 1, 2, 'k2', 2, 3, 1, 'kx']) #Person = Table('person', ['id', 'name', 'dob']) #Note = Table('note', ['id', 'person_id', 'content'])
TestOnConflictPostgresql
python
pypa__warehouse
warehouse/oidc/models/activestate.py
{ "start": 5677, "end": 7347 }
class ____(ActiveStatePublisherMixin, PendingOIDCPublisher): __tablename__ = "pending_activestate_oidc_publishers" __mapper_args__ = {"polymorphic_identity": "pending_activestate_oidc_publishers"} __table_args__ = ( # type: ignore[assignment] UniqueConstraint( "organization", "activestate_project_name", "actor_id", name="_pending_activestate_oidc_publisher_uc", ), ) id: Mapped[UUID] = mapped_column( PG_UUID(as_uuid=True), ForeignKey(PendingOIDCPublisher.id), primary_key=True ) def reify(self, session: Session) -> ActiveStatePublisher: """ Returns a `ActiveStatePublisher` for this `PendingActiveStatePublisher`, deleting the `PendingActiveStatePublisher` in the process. """ # Check if the publisher already exists. Return it if it does. maybe_publisher = ( session.query(ActiveStatePublisher) .filter( ActiveStatePublisher.organization == self.organization, ActiveStatePublisher.activestate_project_name == self.activestate_project_name, ActiveStatePublisher.actor_id == self.actor_id, ActiveStatePublisher.actor == self.actor, ) .one_or_none() ) publisher = maybe_publisher or ActiveStatePublisher( organization=self.organization, activestate_project_name=self.activestate_project_name, actor_id=self.actor_id, actor=self.actor, ) session.delete(self) return publisher
PendingActiveStatePublisher
python
Farama-Foundation__Gymnasium
gymnasium/envs/phys2d/pendulum.py
{ "start": 7549, "end": 8286 }
class ____(FunctionalJaxEnv, EzPickle): """Jax-based pendulum environment using the functional version as base.""" metadata = { "render_modes": ["rgb_array"], "render_fps": 30, "jax": True, "autoreset_mode": AutoresetMode.NEXT_STEP, } def __init__(self, render_mode: str | None = None, **kwargs: Any): """Constructor where the kwargs are passed to the base environment to modify the parameters.""" EzPickle.__init__(self, render_mode=render_mode, **kwargs) env = PendulumFunctional(**kwargs) env.transform(jax.jit) super().__init__( env, metadata=self.metadata, render_mode=render_mode, )
PendulumJaxEnv
python
openai__openai-python
src/openai/resources/realtime/client_secrets.py
{ "start": 7435, "end": 7700 }
class ____: def __init__(self, client_secrets: AsyncClientSecrets) -> None: self._client_secrets = client_secrets self.create = async_to_streamed_response_wrapper( client_secrets.create, )
AsyncClientSecretsWithStreamingResponse
python
huggingface__transformers
src/transformers/models/hubert/modular_hubert.py
{ "start": 3504, "end": 3567 }
class ____(Wav2Vec2FeatureEncoder): pass
HubertFeatureEncoder
python
realpython__materials
python-protocol/adder_v4.py
{ "start": 225, "end": 408 }
class ____: def add(self, x: float, y: float) -> float: return x + y def add(adder: Adder) -> None: print(adder.add(2, 3)) add(IntAdder()) add(FloatAdder())
FloatAdder
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_stateful.py
{ "start": 1704, "end": 2846 }
class ____(RuleBasedStateMachine): num = 0 @rule() def add_one(self): self.num += 1 @rule() def set_to_zero(self): self.num = 0 @rule(num=integers()) @precondition(lambda self: self.num != 0) def div_by_precondition_after(self, num): self.num = num / self.num @precondition(lambda self: self.num != 0) @rule(num=integers()) def div_by_precondition_before(self, num): self.num = num / self.num TestPrecondition = PreconditionMachine.TestCase TestPrecondition.settings = Settings(TestPrecondition.settings, max_examples=10) def test_picks_up_settings_at_first_use_of_testcase(): assert TestPrecondition.settings.max_examples == 10 def test_multiple_rules_same_func(): test_class = MultipleRulesSameFuncMachine.TestCase with capture_out() as o: test_class().runTest() output = o.getvalue() assert "rule1data" in output assert "rule2data" in output def test_can_get_test_case_off_machine_instance(): assert DepthMachine().TestCase is DepthMachine().TestCase assert DepthMachine().TestCase is not None
PreconditionMachine
python
kubernetes-client__python
kubernetes/client/models/v1_cel_device_selector.py
{ "start": 383, "end": 8590 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'expression': 'str' } attribute_map = { 'expression': 'expression' } def __init__(self, expression=None, local_vars_configuration=None): # noqa: E501 """V1CELDeviceSelector - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._expression = None self.discriminator = None self.expression = expression @property def expression(self): """Gets the expression of this V1CELDeviceSelector. # noqa: E501 Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort. The expression's input is an object named \"device\", which carries the following properties: - driver (string): the name of the driver which defines this device. - attributes (map[string]object): the device's attributes, grouped by prefix (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all of the attributes which were prefixed by \"dra.example.com\". - capacity (map[string]object): the device's capacities, grouped by prefix. - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device (v1.34+ with the DRAConsumableCapacity feature enabled). Example: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields: device.driver device.attributes[\"dra.example.com\"].model device.attributes[\"ext.example.com\"].family device.capacity[\"dra.example.com\"].modules The device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers. The value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity. If an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort. A robust expression should check for the existence of attributes before referencing them. For ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example: cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool) The length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps. # noqa: E501 :return: The expression of this V1CELDeviceSelector. # noqa: E501 :rtype: str """ return self._expression @expression.setter def expression(self, expression): """Sets the expression of this V1CELDeviceSelector. Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort. The expression's input is an object named \"device\", which carries the following properties: - driver (string): the name of the driver which defines this device. - attributes (map[string]object): the device's attributes, grouped by prefix (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all of the attributes which were prefixed by \"dra.example.com\". - capacity (map[string]object): the device's capacities, grouped by prefix. - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device (v1.34+ with the DRAConsumableCapacity feature enabled). Example: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields: device.driver device.attributes[\"dra.example.com\"].model device.attributes[\"ext.example.com\"].family device.capacity[\"dra.example.com\"].modules The device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers. The value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity. If an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort. A robust expression should check for the existence of attributes before referencing them. For ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example: cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool) The length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps. # noqa: E501 :param expression: The expression of this V1CELDeviceSelector. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501 raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501 self._expression = expression def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1CELDeviceSelector): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1CELDeviceSelector): return True return self.to_dict() != other.to_dict()
V1CELDeviceSelector
python
dagster-io__dagster
python_modules/automation/automation/dagster_dev/commands/diff_summarizer.py
{ "start": 485, "end": 764 }
class ____: """Represents a structural change without full content.""" change_type: str # "added", "modified", "deleted" name: str # Function/class name file_path: str # Where the change occurred details: str # Brief description @dataclass
StructuralChange
python
django__django
tests/backends/mysql/test_features.py
{ "start": 227, "end": 2050 }
class ____(TestCase): def test_supports_transactions(self): """ All storage engines except MyISAM support transactions. """ del connection.features.supports_transactions with mock.patch( "django.db.connection.features._mysql_storage_engine", "InnoDB" ): self.assertTrue(connection.features.supports_transactions) del connection.features.supports_transactions with mock.patch( "django.db.connection.features._mysql_storage_engine", "MyISAM" ): self.assertFalse(connection.features.supports_transactions) del connection.features.supports_transactions def test_allows_auto_pk_0(self): with mock.MagicMock() as _connection: _connection.sql_mode = {"NO_AUTO_VALUE_ON_ZERO"} database_features = DatabaseFeatures(_connection) self.assertIs(database_features.allows_auto_pk_0, True) def test_allows_group_by_selected_pks(self): with mock.MagicMock() as _connection: _connection.mysql_is_mariadb = False database_features = DatabaseFeatures(_connection) self.assertIs(database_features.allows_group_by_selected_pks, True) with mock.MagicMock() as _connection: _connection.mysql_is_mariadb = False _connection.sql_mode = {} database_features = DatabaseFeatures(_connection) self.assertIs(database_features.allows_group_by_selected_pks, True) with mock.MagicMock() as _connection: _connection.mysql_is_mariadb = True _connection.sql_mode = {"ONLY_FULL_GROUP_BY"} database_features = DatabaseFeatures(_connection) self.assertIs(database_features.allows_group_by_selected_pks, False)
TestFeatures
python
ipython__ipython
IPython/core/guarded_eval.py
{ "start": 12486, "end": 13623 }
class ____: #: Local namespace locals: dict #: Global namespace globals: dict #: Evaluation policy identifier evaluation: EvaluationPolicyName = "forbidden" #: Whether the evaluation of code takes place inside of a subscript. #: Useful for evaluating ``:-1, 'col'`` in ``df[:-1, 'col']``. in_subscript: bool = False #: Auto import method auto_import: Callable[list[str], ModuleType] | None = None #: Overrides for evaluation policy policy_overrides: dict = field(default_factory=dict) #: Transient local namespace used to store mocks transient_locals: dict = field(default_factory=dict) #: Transients of class level class_transients: dict | None = None #: Instance variable name used in the method definition instance_arg_name: str | None = None #: Currently associated value #: Useful for adding items to _Duck on annotated assignment current_value: ast.AST | None = None def replace(self, /, **changes): """Return a new copy of the context, with specified changes""" return dataclasses.replace(self, **changes)
EvaluationContext
python
wandb__wandb
wandb/vendor/pygments/styles/pastie.py
{ "start": 441, "end": 2473 }
class ____(Style): """ Style similar to the pastie default style. """ default_style = '' styles = { Whitespace: '#bbbbbb', Comment: '#888888', Comment.Preproc: 'bold #cc0000', Comment.Special: 'bg:#fff0f0 bold #cc0000', String: 'bg:#fff0f0 #dd2200', String.Regex: 'bg:#fff0ff #008800', String.Other: 'bg:#f0fff0 #22bb22', String.Symbol: '#aa6600', String.Interpol: '#3333bb', String.Escape: '#0044dd', Operator.Word: '#008800', Keyword: 'bold #008800', Keyword.Pseudo: 'nobold', Keyword.Type: '#888888', Name.Class: 'bold #bb0066', Name.Exception: 'bold #bb0066', Name.Function: 'bold #0066bb', Name.Property: 'bold #336699', Name.Namespace: 'bold #bb0066', Name.Builtin: '#003388', Name.Variable: '#336699', Name.Variable.Class: '#336699', Name.Variable.Instance: '#3333bb', Name.Variable.Global: '#dd7700', Name.Constant: 'bold #003366', Name.Tag: 'bold #bb0066', Name.Attribute: '#336699', Name.Decorator: '#555555', Name.Label: 'italic #336699', Number: 'bold #0000DD', Generic.Heading: '#333', Generic.Subheading: '#666', Generic.Deleted: 'bg:#ffdddd #000000', Generic.Inserted: 'bg:#ddffdd #000000', Generic.Error: '#aa0000', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: '#555555', Generic.Output: '#888888', Generic.Traceback: '#aa0000', Error: 'bg:#e3d2d2 #a61717' }
PastieStyle
python
scipy__scipy
scipy/interpolate/tests/test_interpnd.py
{ "start": 7289, "end": 8790 }
class ____: def test_smoketest(self): x = np.array([(0, 0), (0, 2), (1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float) tri = qhull.Delaunay(x) # Should be exact for linear functions, independent of triangulation funcs = [ (lambda x, y: 0*x + 1, (0, 0)), (lambda x, y: 0 + x, (1, 0)), (lambda x, y: -2 + y, (0, 1)), (lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15)) ] for j, (func, grad) in enumerate(funcs): z = func(x[:,0], x[:,1]) dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6) assert dz.shape == (6, 2) xp_assert_close( dz, np.array(grad)[None, :] + 0*dz, rtol=1e-5, atol=1e-5, err_msg=f"item {j}" ) def test_regression_2359(self): # Check regression --- for certain point sets, gradient # estimation could end up in an infinite loop points = np.load(data_file('estimate_gradients_hang.npy')) values = np.random.rand(points.shape[0]) tri = qhull.Delaunay(points) # This should not hang with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "Gradient estimation did not converge", interpnd.GradientEstimationWarning ) interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
TestEstimateGradients2DGlobal
python
kamyu104__LeetCode-Solutions
Python/longest-common-subsequence-between-sorted-arrays.py
{ "start": 839, "end": 1125 }
class ____(object): def longestCommomSubsequence(self, arrays): """ :type arrays: List[List[int]] :rtype: List[int] """ return [num for num, cnt in collections.Counter(x for arr in arrays for x in arr).iteritems() if cnt == len(arrays)]
Solution2
python
Netflix__metaflow
metaflow/plugins/env_escape/override_decorators.py
{ "start": 0, "end": 301 }
class ____(object): def __init__(self, obj_mapping, wrapped_function): self._obj_mapping = obj_mapping self._wrapped = wrapped_function @property def obj_mapping(self): return self._obj_mapping @property def func(self): return self._wrapped
Override
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 256562, "end": 257033 }
class ____(_PrintableStructure): _fields_ = [ ('version', c_uint), ('perfProfilesMask', c_nvmlMask255_t), ('perfProfile', c_nvmlWorkloadPowerProfileInfo_v1_t * NVML_WORKLOAD_POWER_MAX_PROFILES) ] def __init__(self): super(c_nvmlWorkloadPowerProfileProfilesInfo_v1_t, self).__init__(version=nvmlWorkloadPowerProfileProfilesInfo_v1) nvmlWorkloadPowerProfileCurrentProfiles_v1 = 0x1000064
c_nvmlWorkloadPowerProfileProfilesInfo_v1_t
python
huggingface__transformers
src/transformers/models/bark/modeling_bark.py
{ "start": 12795, "end": 13692 }
class ____(PreTrainedModel): config: BarkConfig supports_gradient_checkpointing = False _supports_flash_attn = True @property def device(self) -> torch.device: """ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ # if has _hf_hook, has been offloaded so the device has to be found in the hook if not hasattr(self, "_hf_hook"): return super().device for module in self.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return super().device # GPT2-like autoregressive model
BarkPreTrainedModel
python
chardet__chardet
chardet/hebrewprober.py
{ "start": 6664, "end": 14471 }
class ____(CharSetProber): SPACE = 0x20 # windows-1255 / ISO-8859-8 code points of interest FINAL_KAF = 0xEA NORMAL_KAF = 0xEB FINAL_MEM = 0xED NORMAL_MEM = 0xEE FINAL_NUN = 0xEF NORMAL_NUN = 0xF0 FINAL_PE = 0xF3 NORMAL_PE = 0xF4 FINAL_TSADI = 0xF5 NORMAL_TSADI = 0xF6 # Minimum Visual vs Logical final letter score difference. # If the difference is below this, don't rely solely on the final letter score # distance. MIN_FINAL_CHAR_DISTANCE = 5 # Minimum Visual vs Logical model score difference. # If the difference is below this, don't rely at all on the model score # distance. MIN_MODEL_DISTANCE = 0.01 VISUAL_HEBREW_NAME = "ISO-8859-8" LOGICAL_HEBREW_NAME = "windows-1255" def __init__(self) -> None: super().__init__() self._final_char_logical_score = 0 self._final_char_visual_score = 0 self._prev = self.SPACE self._before_prev = self.SPACE self._logical_prober: Optional[SingleByteCharSetProber] = None self._visual_prober: Optional[SingleByteCharSetProber] = None self.reset() def reset(self) -> None: self._final_char_logical_score = 0 self._final_char_visual_score = 0 # The two last characters seen in the previous buffer, # mPrev and mBeforePrev are initialized to space in order to simulate # a word delimiter at the beginning of the data self._prev = self.SPACE self._before_prev = self.SPACE # These probers are owned by the group prober. def set_model_probers( self, logical_prober: SingleByteCharSetProber, visual_prober: SingleByteCharSetProber, ) -> None: self._logical_prober = logical_prober self._visual_prober = visual_prober def is_final(self, c: int) -> bool: return c in [ self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN, self.FINAL_PE, self.FINAL_TSADI, ] def is_non_final(self, c: int) -> bool: # The normal Tsadi is not a good Non-Final letter due to words like # 'lechotet' (to chat) containing an apostrophe after the tsadi. This # apostrophe is converted to a space in FilterWithoutEnglishLetters # causing the Non-Final tsadi to appear at an end of a word even # though this is not the case in the original text. # The letters Pe and Kaf rarely display a related behavior of not being # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' # for example legally end with a Non-Final Pe or Kaf. However, the # benefit of these letters as Non-Final letters outweighs the damage # since these words are quite rare. return c in [self.NORMAL_KAF, self.NORMAL_MEM, self.NORMAL_NUN, self.NORMAL_PE] def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: # Final letter analysis for logical-visual decision. # Look for evidence that the received buffer is either logical Hebrew # or visual Hebrew. # The following cases are checked: # 1) A word longer than 1 letter, ending with a final letter. This is # an indication that the text is laid out "naturally" since the # final letter really appears at the end. +1 for logical score. # 2) A word longer than 1 letter, ending with a Non-Final letter. In # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, # should not end with the Non-Final form of that letter. Exceptions # to this rule are mentioned above in isNonFinal(). This is an # indication that the text is laid out backwards. +1 for visual # score # 3) A word longer than 1 letter, starting with a final letter. Final # letters should not appear at the beginning of a word. This is an # indication that the text is laid out backwards. +1 for visual # score. # # The visual score and logical score are accumulated throughout the # text and are finally checked against each other in GetCharSetName(). # No checking for final letters in the middle of words is done since # that case is not an indication for either Logical or Visual text. # # We automatically filter out all 7-bit characters (replace them with # spaces) so the word boundary detection works properly. [MAP] if self.state == ProbingState.NOT_ME: # Both model probers say it's not them. No reason to continue. return ProbingState.NOT_ME byte_str = self.filter_high_byte_only(byte_str) for cur in byte_str: if cur == self.SPACE: # We stand on a space - a word just ended if self._before_prev != self.SPACE: # next-to-last char was not a space so self._prev is not a # 1 letter word if self.is_final(self._prev): # case (1) [-2:not space][-1:final letter][cur:space] self._final_char_logical_score += 1 elif self.is_non_final(self._prev): # case (2) [-2:not space][-1:Non-Final letter][ # cur:space] self._final_char_visual_score += 1 else: # Not standing on a space if ( (self._before_prev == self.SPACE) and (self.is_final(self._prev)) and (cur != self.SPACE) ): # case (3) [-2:space][-1:final letter][cur:not space] self._final_char_visual_score += 1 self._before_prev = self._prev self._prev = cur # Forever detecting, till the end or until both model probers return # ProbingState.NOT_ME (handled above) return ProbingState.DETECTING @property def charset_name(self) -> str: assert self._logical_prober is not None assert self._visual_prober is not None # Make the decision: is it Logical or Visual? # If the final letter score distance is dominant enough, rely on it. finalsub = self._final_char_logical_score - self._final_char_visual_score if finalsub >= self.MIN_FINAL_CHAR_DISTANCE: return self.LOGICAL_HEBREW_NAME if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE: return self.VISUAL_HEBREW_NAME # It's not dominant enough, try to rely on the model scores instead. modelsub = ( self._logical_prober.get_confidence() - self._visual_prober.get_confidence() ) if modelsub > self.MIN_MODEL_DISTANCE: return self.LOGICAL_HEBREW_NAME if modelsub < -self.MIN_MODEL_DISTANCE: return self.VISUAL_HEBREW_NAME # Still no good, back to final letter distance, maybe it'll save the # day. if finalsub < 0.0: return self.VISUAL_HEBREW_NAME # (finalsub > 0 - Logical) or (don't know what to do) default to # Logical. return self.LOGICAL_HEBREW_NAME @property def language(self) -> str: return "Hebrew" @property def state(self) -> ProbingState: assert self._logical_prober is not None assert self._visual_prober is not None # Remain active as long as any of the model probers are active. if (self._logical_prober.state == ProbingState.NOT_ME) and ( self._visual_prober.state == ProbingState.NOT_ME ): return ProbingState.NOT_ME return ProbingState.DETECTING
HebrewProber
python
huggingface__transformers
tests/models/timesfm/test_modeling_timesfm.py
{ "start": 5761, "end": 7445 }
class ____(unittest.TestCase): def test_inference(self): model = TimesFmModelForPrediction.from_pretrained("google/timesfm-2.0-500m-pytorch").to(torch_device) forecast_input = [ np.sin(np.linspace(0, 20, 100)), np.sin(np.linspace(0, 20, 200)), np.sin(np.linspace(0, 20, 400)), ] forecast_input_tensor = [torch.tensor(ts, dtype=torch.float32, device=torch_device) for ts in forecast_input] frequency_input = [0, 1, 2] with torch.no_grad(): output = model(past_values=forecast_input_tensor, freq=frequency_input) mean_predictions = output.mean_predictions self.assertEqual(mean_predictions.shape, torch.Size([3, model.config.horizon_length])) # fmt: off expected_slice = torch.tensor( [ 0.9813, 1.0086, 0.9985, 0.9432, 0.8505, 0.7203, 0.5596, 0.3788, 0.1796, -0.0264, -0.2307, -0.4255, -0.5978, -0.7642, -0.8772, -0.9670, -1.0110, -1.0162, -0.9848, -0.9151, -0.8016, -0.6511, -0.4707, -0.2842, -0.0787, 0.1260, 0.3293, 0.5104, 0.6818, 0.8155, 0.9172, 0.9843, 1.0101, 1.0025, 0.9529, 0.8588, 0.7384, 0.5885, 0.4022, 0.2099, -0.0035, -0.2104, -0.4146, -0.6033, -0.7661, -0.8818, -0.9725, -1.0191, -1.0190, -0.9874, -0.9137, -0.8069, -0.6683, -0.4939, -0.3086, -0.1106, 0.0846, 0.2927, 0.4832, 0.6612, 0.8031, 0.9051, 0.9772, 1.0064 ], device=torch_device) # fmt: on self.assertTrue(torch.allclose(mean_predictions[0, :64], expected_slice, atol=TOLERANCE))
TimesFmModelIntegrationTests
python
apache__airflow
airflow-core/src/airflow/api_fastapi/execution_api/versions/v2025_05_20.py
{ "start": 1005, "end": 2399 }
class ____(VersionChange): """Downgrade the upstream map indexes type for older clients.""" description = __doc__ instructions_to_migrate_to_previous_version = ( schema(TIRunContext).field("upstream_map_indexes").had(type=dict[str, int | None] | None), ) @convert_response_to_previous_version_for(TIRunContext) # type: ignore[arg-type] def downgrade_upstream_map_indexes(response: ResponseInfo = None) -> None: # type: ignore """ Downgrades the `upstream_map_indexes` field when converting to the previous version. Ensures that the field is only a dictionary of [str, int] (old format). """ resp = response.body.get("upstream_map_indexes") if isinstance(resp, dict): downgraded: dict[str, int | list | None] = {} for k, v in resp.items(): if isinstance(v, int): downgraded[k] = v elif isinstance(v, list) and v and all(isinstance(i, int) for i in v): downgraded[k] = v[0] else: # Keep values like None as is — the Task SDK expects them unchanged during mapped task expansion, # and modifying them can cause unexpected failures. downgraded[k] = None response.body["upstream_map_indexes"] = downgraded
DowngradeUpstreamMapIndexes
python
python-poetry__poetry
tests/console/test_application.py
{ "start": 993, "end": 1152 }
class ____(Command): name = "foo" description = "Foo Command" def handle(self) -> int: self.line("foo called") return 0
FooCommand
python
PrefectHQ__prefect
tests/server/orchestration/api/test_workers.py
{ "start": 37783, "end": 38533 }
class ____: @pytest.fixture(autouse=True) async def create_work_pools(self, client): for name in ["C", "B", "A"]: await client.post("/work_pools/", json=dict(name=name, type="test")) async def test_count_work_pools(self, client): response = await client.post("/work_pools/count") assert response.status_code == status.HTTP_200_OK, response.text assert response.json() == 3 async def test_count_work_pools_applies_filter(self, client): response = await client.post( "/work_pools/count", json={"work_pools": {"name": {"any_": ["A"]}}} ) assert response.status_code == status.HTTP_200_OK, response.text assert response.json() == 1
TestCountWorkPools
python
kubernetes-client__python
kubernetes/base/dynamic/resource.py
{ "start": 13332, "end": 14775 }
class ____(object): """ A parsed instance of an API resource attribute. It exists solely to ease interaction with API objects by allowing attributes to be accessed with '.' notation """ def __init__(self, params): self.__dict__.update(**params) def __repr__(self): return pformat(self.__dict__) def __eq__(self, other): return self.__dict__ == other.__dict__ def __getitem__(self, name): return self.__dict__.get(name) # Here resource.items will return items if available or resource.__dict__.items function if not # resource.get will call resource.__dict__.get after attempting resource.__dict__.get('get') def __getattr__(self, name): return self.__dict__.get(name, getattr(self.__dict__, name, None)) def __setattr__(self, name, value): self.__dict__[name] = value def __dir__(self): return dir(type(self)) + list(self.__dict__.keys()) def __iter__(self): for k, v in self.__dict__.items(): yield (k, v) def to_dict(self): return self.__serialize(self) def __serialize(self, field): if isinstance(field, ResourceField): return { k: self.__serialize(v) for k, v in field.__dict__.items() } if isinstance(field, (list, tuple)): return [self.__serialize(item) for item in field] return field
ResourceField
python
coleifer__peewee
peewee.py
{ "start": 189756, "end": 192949 }
class ____(MetaField): accessor_class = ManyToManyFieldAccessor def __init__(self, model, backref=None, through_model=None, on_delete=None, on_update=None, prevent_unsaved=True, _is_backref=False): if through_model is not None: if not (isinstance(through_model, DeferredThroughModel) or is_model(through_model)): raise TypeError('Unexpected value for through_model. Expected ' 'Model or DeferredThroughModel.') if not _is_backref and (on_delete is not None or on_update is not None): raise ValueError('Cannot specify on_delete or on_update when ' 'through_model is specified.') self.rel_model = model self.backref = backref self._through_model = through_model self._on_delete = on_delete self._on_update = on_update self._prevent_unsaved = prevent_unsaved self._is_backref = _is_backref def _get_descriptor(self): return ManyToManyFieldAccessor(self) def bind(self, model, name, set_attribute=True): if isinstance(self._through_model, DeferredThroughModel): self._through_model.set_field(model, self, name) return super(ManyToManyField, self).bind(model, name, set_attribute) if not self._is_backref: many_to_many_field = ManyToManyField( self.model, backref=name, through_model=self.through_model, on_delete=self._on_delete, on_update=self._on_update, _is_backref=True) self.backref = self.backref or model._meta.name + 's' self.rel_model._meta.add_field(self.backref, many_to_many_field) def get_models(self): return [model for _, model in sorted(( (self._is_backref, self.model), (not self._is_backref, self.rel_model)))] @property def through_model(self): if self._through_model is None: self._through_model = self._create_through_model() return self._through_model @through_model.setter def through_model(self, value): self._through_model = value def _create_through_model(self): lhs, rhs = self.get_models() tables = [model._meta.table_name for model in (lhs, rhs)] class Meta: database = self.model._meta.database schema = self.model._meta.schema table_name = '%s_%s_through' % tuple(tables) indexes = ( ((lhs._meta.name, rhs._meta.name), True),) params = {'on_delete': self._on_delete, 'on_update': self._on_update} attrs = { lhs._meta.name: ForeignKeyField(lhs, **params), rhs._meta.name: ForeignKeyField(rhs, **params), 'Meta': Meta} klass_name = '%s%sThrough' % (lhs.__name__, rhs.__name__) return type(klass_name, (Model,), attrs) def get_through_model(self): # XXX: Deprecated. Just use the "through_model" property. return self.through_model
ManyToManyField
python
ApeWorX__ape
tests/functional/test_test.py
{ "start": 2270, "end": 5969 }
class ____: @pytest.mark.parametrize( "cli_value,config_value", [(True, False), (False, True), (False, False)] ) def test_show_internal(self, mocker, cli_value, config_value): pytest_cfg = mocker.MagicMock() ape_test_cfg = mocker.MagicMock() wrapper = ConfigWrapper(pytest_cfg) wrapper.__dict__["ape_test_config"] = ape_test_cfg expected = cli_value or config_value # True if there is a True pytest_cfg.getoption.return_value = cli_value ape_test_cfg.show_internal = config_value assert wrapper.show_internal is expected def test_verbosity(self, mocker): """ Show it returns the same as pytest_config's. """ pytest_cfg = mocker.MagicMock() pytest_cfg.option.verbose = False wrapper = ConfigWrapper(pytest_cfg) assert wrapper.verbosity is False def test_verbosity_when_no_capture(self, mocker): """ Shows we enable verbose output when no-capture is set. """ def get_opt(name: str): return "no" if name == "capture" else None pytest_cfg = mocker.MagicMock() pytest_cfg.option.verbose = False # Start off as False pytest_cfg.getoption.side_effect = get_opt wrapper = ConfigWrapper(pytest_cfg) assert wrapper.verbosity is True @pytest.mark.parametrize("flag", (True, None)) def test_isolation_command_line(self, mocker, flag): pytest_cfg = mocker.MagicMock() def get_opt(name: str): if name == "disable_isolation": return flag pytest_cfg.getoption.side_effect = get_opt wrapper = ConfigWrapper(pytest_cfg) if flag: assert not wrapper.isolation for scope in Scope: assert not wrapper.get_isolation(scope) else: assert wrapper.isolation def test_isolation_config(self, mocker): pytest_cfg = mocker.MagicMock() pytest_cfg.getoption.return_value = None ape_test_cfg = ApeTestConfig() wrapper = ConfigWrapper(pytest_cfg) wrapper.__dict__["ape_test_config"] = ape_test_cfg # Show can configure isolation as True. ape_test_cfg.isolation = True for scope in Scope: assert wrapper.get_isolation(scope) # Show can configure isolation as False. ape_test_cfg.isolation = False for scope in Scope: assert not wrapper.get_isolation(scope) # Show can configure individual scopes. ape_test_cfg.isolation = IsolationConfig( enable_session=True, enable_package=False, enable_function=True, ) assert wrapper.get_isolation(Scope.SESSION) assert not wrapper.get_isolation(Scope.PACKAGE) # default assert wrapper.get_isolation(Scope.MODULE) # default assert wrapper.get_isolation(Scope.FUNCTION) def test_connect_to_mainnet_by_default(mocker): """ Tests the condition where mainnet is configured as the default network and no --network option is passed. It should avoid running the tests to be safe. """ cfg = mocker.MagicMock() cfg.network = "ethereum:mainnet:node" runner = PytestApeRunner( cfg, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock() ) expected = ( "Default network is mainnet; unable to run tests on mainnet. " "Please specify the network using the `--network` flag or " "configure a different default network." ) with pytest.raises(ConfigError, match=expected): runner._connect()
TestConfigWrapper
python
pytorch__pytorch
test/torch_np/test_ndarray_methods.py
{ "start": 6205, "end": 12290 }
class ____(TestCase): sizes = [ (), (3,), (3, 2), (2, 3), (3, 3), (2, 3, 4), (4, 3, 2), (1, 2, 3, 4), (2, 3, 4, 1), (3, 4, 1, 2), (4, 1, 2, 3), (64,), (128,), (256,), ] @skipif(numpy.__version__ < "1.22", reason="NP_VER: fails on NumPy 1.21.x") @parametrize( "size, axis", list( itertools.chain( *[ [ (size, axis) for axis in list(range(-len(size), len(size))) + [None] ] for size in sizes ] ) ), ) @parametrize("method", [np.argmax, np.argmin]) def test_np_argmin_argmax_keepdims(self, size, axis, method): # arr = np.random.normal(size=size) arr = np.empty(shape=size) # contiguous arrays if axis is None: new_shape = [1 for _ in range(len(size))] else: new_shape = list(size) new_shape[axis] = 1 new_shape = tuple(new_shape) _res_orig = method(arr, axis=axis) res_orig = _res_orig.reshape(new_shape) res = method(arr, axis=axis, keepdims=True) assert_equal(res, res_orig) assert res.shape == new_shape outarray = np.empty(res.shape, dtype=res.dtype) res1 = method(arr, axis=axis, out=outarray, keepdims=True) assert res1 is outarray assert_equal(res, outarray) if len(size) > 0: wrong_shape = list(new_shape) if axis is not None: wrong_shape[axis] = 2 else: wrong_shape[0] = 2 wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) with pytest.raises(ValueError): method(arr.T, axis=axis, out=wrong_outarray, keepdims=True) # non-contiguous arrays if axis is None: new_shape = [1 for _ in range(len(size))] else: new_shape = list(size)[::-1] new_shape[axis] = 1 new_shape = tuple(new_shape) _res_orig = method(arr.T, axis=axis) res_orig = _res_orig.reshape(new_shape) res = method(arr.T, axis=axis, keepdims=True) assert_equal(res, res_orig) assert res.shape == new_shape outarray = np.empty(new_shape[::-1], dtype=res.dtype) outarray = outarray.T res1 = method(arr.T, axis=axis, out=outarray, keepdims=True) assert res1 is outarray assert_equal(res, outarray) if len(size) > 0: # one dimension lesser for non-zero sized # array should raise an error with pytest.raises(ValueError): method(arr[0], axis=axis, out=outarray, keepdims=True) if len(size) > 0: wrong_shape = list(new_shape) if axis is not None: wrong_shape[axis] = 2 else: wrong_shape[0] = 2 wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) with pytest.raises(ValueError): method(arr.T, axis=axis, out=wrong_outarray, keepdims=True) @skipif(True, reason="XXX: need ndarray.chooses") @parametrize("method", ["max", "min"]) def test_all(self, method): # a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) a = np.arange(4 * 5 * 6 * 7 * 8).reshape((4, 5, 6, 7, 8)) arg_method = getattr(a, "arg" + method) val_method = getattr(a, method) for i in range(a.ndim): a_maxmin = val_method(i) aarg_maxmin = arg_method(i) axes = list(range(a.ndim)) axes.remove(i) assert np.all(a_maxmin == aarg_maxmin.choose(*a.transpose(i, *axes))) @parametrize("method", ["argmax", "argmin"]) def test_output_shape(self, method): # see also gh-616 a = np.ones((10, 5)) arg_method = getattr(a, method) # Check some simple shape mismatches out = np.ones(11, dtype=np.int_) with assert_raises(ValueError): arg_method(-1, out=out) out = np.ones((2, 5), dtype=np.int_) with assert_raises(ValueError): arg_method(-1, out=out) # these could be relaxed possibly (used to allow even the previous) out = np.ones((1, 10), dtype=np.int_) with assert_raises(ValueError): arg_method(-1, out=out) out = np.ones(10, dtype=np.int_) arg_method(-1, out=out) assert_equal(out, arg_method(-1)) @parametrize("ndim", [0, 1]) @parametrize("method", ["argmax", "argmin"]) def test_ret_is_out(self, ndim, method): a = np.ones((4,) + (256,) * ndim) arg_method = getattr(a, method) out = np.empty((256,) * ndim, dtype=np.intp) ret = arg_method(axis=0, out=out) assert ret is out @parametrize( "arr_method, np_method", [("argmax", np.argmax), ("argmin", np.argmin)] ) def test_np_vs_ndarray(self, arr_method, np_method): # make sure both ndarray.argmax/argmin and # numpy.argmax/argmin support out/axis args # a = np.random.normal(size=(2, 3)) a = np.arange(6).reshape((2, 3)) arg_method = getattr(a, arr_method) # check keyword args out1 = np.zeros(3, dtype=int) out2 = np.zeros(3, dtype=int) assert_equal(arg_method(out=out1, axis=0), np_method(a, out=out2, axis=0)) assert_equal(out1, out2) @parametrize( "arr_method, np_method", [("argmax", np.argmax), ("argmin", np.argmin)] ) def test_np_vs_ndarray_positional(self, arr_method, np_method): a = np.arange(6).reshape((2, 3)) arg_method = getattr(a, arr_method) # check positional args out1 = np.zeros(2, dtype=int) out2 = np.zeros(2, dtype=int) assert_equal(arg_method(1, out1), np_method(a, 1, out2)) assert_equal(out1, out2) @instantiate_parametrized_tests
TestArgmaxArgminCommon
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 428295, "end": 428591 }
class ____(VegaLiteSchema): """ FieldRange schema wrapper. Parameters ---------- field : str """ _schema = {"$ref": "#/definitions/FieldRange"} def __init__(self, field: Optional[str] = Undefined, **kwds): super().__init__(field=field, **kwds)
FieldRange
python
numpy__numpy
numpy/lib/tests/test_shape_base.py
{ "start": 2876, "end": 4305 }
class ____: def test_replace_max(self): a_base = np.array([[10, 30, 20], [60, 40, 50]]) for axis in list(range(a_base.ndim)) + [None]: # we mutate this in the loop a = a_base.copy() # replace the max with a small value i_max = _add_keepdims(np.argmax)(a, axis=axis) put_along_axis(a, i_max, -99, axis=axis) # find the new minimum, which should max i_min = _add_keepdims(np.argmin)(a, axis=axis) assert_equal(i_min, i_max) def test_broadcast(self): """ Test that non-indexing dimensions are broadcast in both directions """ a = np.ones((3, 4, 1)) ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 put_along_axis(a, ai, 20, axis=1) assert_equal(take_along_axis(a, ai, axis=1), 20) def test_invalid(self): """ Test invalid inputs """ a_base = np.array([[10, 30, 20], [60, 40, 50]]) indices = np.array([[0], [1]]) values = np.array([[2], [1]]) # sanity check a = a_base.copy() put_along_axis(a, indices, values, axis=0) assert np.all(a == [[2, 2, 2], [1, 1, 1]]) # invalid indices a = a_base.copy() with assert_raises(ValueError) as exc: put_along_axis(a, indices, values, axis=None) assert "single dimension" in str(exc.exception)
TestPutAlongAxis
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0136_addons_customscript_notnull.py
{ "start": 150, "end": 700 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("projects", "0135_addons_load_when_embedded_notnull"), ] operations = [ migrations.AlterField( model_name="addonsconfig", name="customscript_enabled", field=models.BooleanField(default=False), ), migrations.AlterField( model_name="historicaladdonsconfig", name="customscript_enabled", field=models.BooleanField(default=False), ), ]
Migration
python
huggingface__transformers
tests/models/video_llama_3/test_modeling_video_llama_3.py
{ "start": 11510, "end": 13907 }
class ____: def __init__( self, parent, batch_size=12, patch_size=2, num_channels=3, image_size=14, is_training=True, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.patch_size = patch_size self.num_channels = num_channels self.image_size = image_size self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope self.seq_length = (self.image_size // self.patch_size) ** 2 def get_config(self): return VideoLlama3VisionConfig( patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2), ] ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs num_patches = self.image_size // config.patch_size inputs_dict = { "pixel_values": pixel_values, "grid_thw": torch.tensor([[1, num_patches, num_patches]] * self.batch_size, device=torch_device), "merge_sizes": torch.tensor([1] * self.batch_size, device=torch_device), } return config, inputs_dict @require_torch
VideoLlama3VisionModelTester
python
bokeh__bokeh
src/bokeh/document/events.py
{ "start": 27702, "end": 28635 }
class ____(DocumentChangedEvent): ''' A concrete event representing a change to add a new callback (e.g. periodic, timeout, or "next tick") to a Document. ''' def __init__(self, document: Document, callback: SessionCallback) -> None: ''' Args: document (Document) : A Bokeh document that is to be updated. callback (SessionCallback) : The callback to add ''' super().__init__(document) self.callback = callback def dispatch(self, receiver: Any) -> None: ''' Dispatch handling of this event to a receiver. This method will invoke ``receiver._session_callback_added`` if it exists. ''' super().dispatch(receiver) if hasattr(receiver, '_session_callback_added'): cast(SessionCallbackAddedMixin, receiver)._session_callback_added(self)
SessionCallbackAdded
python
eventlet__eventlet
tests/greendns_test.py
{ "start": 30730, "end": 31554 }
class ____(tests.LimitedTestCase): def test_isv4(self): assert greendns.is_ipv4_addr('1.2.3.4') def test_isv4_false(self): assert not greendns.is_ipv4_addr('260.0.0.0') def test_isv6(self): assert greendns.is_ipv6_addr('dead:beef::1') def test_isv6_invalid(self): assert not greendns.is_ipv6_addr('foobar::1') def test_v4(self): assert greendns.is_ip_addr('1.2.3.4') def test_v4_illegal(self): assert not greendns.is_ip_addr('300.0.0.1') def test_v6_addr(self): assert greendns.is_ip_addr('::1') def test_isv4_none(self): assert not greendns.is_ipv4_addr(None) def test_isv6_none(self): assert not greendns.is_ipv6_addr(None) def test_none(self): assert not greendns.is_ip_addr(None)
TestIsIpAddr
python
ray-project__ray
python/ray/serve/tests/test_config_files/multi_fastapi.py
{ "start": 278, "end": 528 }
class ____: def __init__(self, submodel: DeploymentHandle): self.submodel = submodel @app1.get("/{a}") async def func(self, a: int): return await self.submodel.add.remote(a) invalid_model = Model.bind(SubModel.bind())
Model
python
huggingface__transformers
src/transformers/models/granitemoehybrid/modeling_granitemoehybrid.py
{ "start": 52114, "end": 52859 }
class ____(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ GraniteMoeHybridRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
GraniteMoeHybridRMSNorm
python
aimacode__aima-python
making_simple_decision4e.py
{ "start": 1256, "end": 3795 }
class ____(Agent): """A simple information gathering agent. The agent works by repeatedly selecting the observation with the highest information value, until the cost of the next observation is greater than its expected benefit. [Figure 16.9]""" def __init__(self, decnet, infer, initial_evidence=None): """decnet: a decision network infer: the preferred method to carry out inference on the given decision network initial_evidence: initial evidence""" super().__init__() self.decnet = decnet self.infer = infer self.observation = initial_evidence or [] self.variables = self.decnet.nodes def integrate_percept(self, percept): """Integrate the given percept into the decision network""" raise NotImplementedError def execute(self, percept): """Execute the information gathering algorithm""" self.observation = self.integrate_percept(percept) vpis = self.vpi_cost_ratio(self.variables) j = max(vpis) variable = self.variables[j] if self.vpi(variable) > self.cost(variable): return self.request(variable) return self.decnet.best_action() def request(self, variable): """Return the value of the given random variable as the next percept""" raise NotImplementedError def cost(self, var): """Return the cost of obtaining evidence through tests, consultants or questions""" raise NotImplementedError def vpi_cost_ratio(self, variables): """Return the VPI to cost ratio for the given variables""" v_by_c = [] for var in variables: v_by_c.append(self.vpi(var) / self.cost(var)) return v_by_c def vpi(self, variable): """Return VPI for a given variable""" vpi = 0.0 prob_dist = self.infer(variable, self.observation, self.decnet).prob for item, _ in prob_dist.items(): post_prob = prob_dist[item] new_observation = list(self.observation) new_observation.append(item) expected_utility = self.decnet.get_expected_utility(variable, new_observation) vpi += post_prob * expected_utility vpi -= self.decnet.get_expected_utility(variable, self.observation) return vpi # _________________________________________________________________________ # chapter 25 Robotics # TODO: Implement continuous map for MonteCarlo similar to Fig25.10 from the book
InformationGatheringAgent
python
huggingface__transformers
src/transformers/models/vit/modeling_vit.py
{ "start": 8272, "end": 10575 }
class ____(nn.Module): def __init__(self, config: ViTConfig): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.dropout_prob = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 self.is_causal = False self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: batch_size = hidden_states.shape[0] new_shape = batch_size, -1, self.num_attention_heads, self.attention_head_size key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2) query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, query_layer, key_layer, value_layer, None, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob, ) new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.reshape(new_context_layer_shape) return context_layer, attention_probs
ViTSelfAttention
python
django__django
tests/admin_views/admin.py
{ "start": 8640, "end": 8975 }
class ____(BaseModelFormSet): def clean(self): for person_dict in self.cleaned_data: person = person_dict.get("id") alive = person_dict.get("alive") if person and alive and person.name == "Grace Hopper": raise ValidationError("Grace is not a Zombie")
BasePersonModelFormSet
python
encode__httpx
httpx/_client.py
{ "start": 2330, "end": 3418 }
class ____: """ For some parameters such as `auth=...` and `timeout=...` we need to be able to indicate the default "unset" state, in a way that is distinctly different to using `None`. The default "unset" state indicates that whatever default is set on the client should be used. This is different to setting `None`, which explicitly disables the parameter, possibly overriding a client default. For example we use `timeout=USE_CLIENT_DEFAULT` in the `request()` signature. Omitting the `timeout` parameter will send a request using whatever default timeout has been configured on the client. Including `timeout=None` will ensure no timeout is used. Note that user code shouldn't need to use the `USE_CLIENT_DEFAULT` constant, but it is used internally when a parameter is not included. """ USE_CLIENT_DEFAULT = UseClientDefault() logger = logging.getLogger("httpx") USER_AGENT = f"python-httpx/{__version__}" ACCEPT_ENCODING = ", ".join( [key for key in SUPPORTED_DECODERS.keys() if key != "identity"] )
UseClientDefault
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/property15.py
{ "start": 122, "end": 479 }
class ____(Generic[T]): def __init__(self, bar: T): self._bar = bar @property def prop1(self) -> T: return self._bar def method1(self) -> T: reveal_type(self._bar, expected_text="T@ClassA") return self._bar a = ClassA[int](3) # This should work fine because a.bar should be an int a.prop1.bit_length()
ClassA
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/triggers/dms.py
{ "start": 5719, "end": 7208 }
class ____(AwsBaseWaiterTrigger): """ Trigger when an AWS DMS Serverless replication is stopped. :param replication_config_arn: The ARN of the replication config. :param waiter_delay: The amount of time in seconds to wait between attempts. :param waiter_max_attempts: The maximum number of attempts to be made. :param aws_conn_id: The Airflow connection used for AWS credentials. """ def __init__( self, replication_config_arn: str, waiter_delay: int = 30, waiter_max_attempts: int = 60, aws_conn_id: str | None = "aws_default", ) -> None: super().__init__( serialized_fields={"replication_config_arn": replication_config_arn}, waiter_name="replication_stopped", waiter_delay=waiter_delay, waiter_args={"Filters": [{"Name": "replication-config-arn", "Values": [replication_config_arn]}]}, waiter_max_attempts=waiter_max_attempts, failure_message="Replication failed to stop.", status_message="Status replication is", status_queries=["Replications[0].Status"], return_key="replication_config_arn", return_value=replication_config_arn, aws_conn_id=aws_conn_id, ) def hook(self) -> AwsGenericHook: return DmsHook( self.aws_conn_id, verify=self.verify, config=self.botocore_config, )
DmsReplicationStoppedTrigger
python
pytorch__pytorch
test/dynamo/test_buffers_override.py
{ "start": 100, "end": 1593 }
class ____(torch._dynamo.test_case.TestCase): def test_buffers_override(self): class SomeModel(nn.Module): def __init__(self): super().__init__() # Override buffers; should not cause breakage # this is because we use `named_buffers` for # static marking self.register_buffer("A", torch.ones(3, 3)) self.buffers = [] def forward(self): return self.A * torch.zeros(1, 1) model = SomeModel().to(torch.device("cpu")) compiled_model = torch.compile(model) self.assertEqual(compiled_model.A, torch.ones(3, 3)) compiled_model() def test_named_buffers_override(self): class SomeModel(nn.Module): def __init__(self): super().__init__() # Override buffers; should not cause breakage # but skip the marking static here since # named_buffers is overridden self.register_buffer("B", torch.ones(3, 3)) self.named_buffers = [] def forward(self): return self.B * torch.zeros(1, 1) model = SomeModel().to(torch.device("cpu")) compiled_model = torch.compile(model) self.assertEqual(compiled_model.B, torch.ones(3, 3)) compiled_model() if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
TestBuffersOverride
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_bigtable.py
{ "start": 14993, "end": 21339 }
class ____: @pytest.mark.parametrize( ("missing_attribute", "project_id", "instance_id", "cluster_id", "nodes"), [ ("instance_id", PROJECT_ID, "", CLUSTER_ID, NODES), ("cluster_id", PROJECT_ID, INSTANCE_ID, "", NODES), ("nodes", PROJECT_ID, INSTANCE_ID, CLUSTER_ID, ""), ], ) @mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook") def test_empty_attribute(self, mock_hook, missing_attribute, project_id, instance_id, cluster_id, nodes): with pytest.raises(AirflowException) as ctx: BigtableUpdateClusterOperator( project_id=project_id, instance_id=instance_id, cluster_id=cluster_id, nodes=nodes, task_id="id", gcp_conn_id=GCP_CONN_ID, ) err = ctx.value assert str(err) == f"Empty parameter: {missing_attribute}" mock_hook.assert_not_called() @mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook") def test_updating_cluster_but_instance_does_not_exists(self, mock_hook): mock_hook.return_value.get_instance.return_value = None op = BigtableUpdateClusterOperator( project_id=PROJECT_ID, instance_id=INSTANCE_ID, cluster_id=CLUSTER_ID, nodes=NODES, task_id="id", gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) with pytest.raises(AirflowException) as ctx: op.execute(None) err = ctx.value assert str(err) == f"Dependency: instance '{INSTANCE_ID}' does not exist." mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.update_cluster.assert_not_called() @mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook") def test_updating_cluster_but_instance_does_not_exists_empty_project_id(self, mock_hook): mock_hook.return_value.get_instance.return_value = None op = BigtableUpdateClusterOperator( instance_id=INSTANCE_ID, cluster_id=CLUSTER_ID, nodes=NODES, task_id="id", gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) with pytest.raises(AirflowException) as ctx: op.execute(None) err = ctx.value assert str(err) == f"Dependency: instance '{INSTANCE_ID}' does not exist." mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.update_cluster.assert_not_called() @mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook") def test_updating_cluster_that_does_not_exists(self, mock_hook): instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance) mock_hook.return_value.update_cluster.side_effect = mock.Mock( side_effect=google.api_core.exceptions.NotFound("Cluster not found.") ) op = BigtableUpdateClusterOperator( project_id=PROJECT_ID, instance_id=INSTANCE_ID, cluster_id=CLUSTER_ID, nodes=NODES, task_id="id", gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) with pytest.raises(AirflowException) as ctx: op.execute(None) err = ctx.value assert str(err) == f"Dependency: cluster '{CLUSTER_ID}' does not exist for instance '{INSTANCE_ID}'." mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.update_cluster.assert_called_once_with( instance=instance, cluster_id=CLUSTER_ID, nodes=NODES ) @mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook") def test_updating_cluster_that_does_not_exists_empty_project_id(self, mock_hook): instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance) mock_hook.return_value.update_cluster.side_effect = mock.Mock( side_effect=google.api_core.exceptions.NotFound("Cluster not found.") ) op = BigtableUpdateClusterOperator( instance_id=INSTANCE_ID, cluster_id=CLUSTER_ID, nodes=NODES, task_id="id", gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) with pytest.raises(AirflowException) as ctx: op.execute(None) err = ctx.value assert str(err) == f"Dependency: cluster '{CLUSTER_ID}' does not exist for instance '{INSTANCE_ID}'." mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.update_cluster.assert_called_once_with( instance=instance, cluster_id=CLUSTER_ID, nodes=NODES ) @mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook") def test_different_error_reraised(self, mock_hook): op = BigtableUpdateClusterOperator( project_id=PROJECT_ID, instance_id=INSTANCE_ID, cluster_id=CLUSTER_ID, nodes=NODES, task_id="id", gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) instance = mock_hook.return_value.get_instance.return_value = mock.Mock(Instance) mock_hook.return_value.update_cluster.side_effect = mock.Mock( side_effect=google.api_core.exceptions.GoogleAPICallError("error") ) with pytest.raises(google.api_core.exceptions.GoogleAPICallError): op.execute(None) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.update_cluster.assert_called_once_with( instance=instance, cluster_id=CLUSTER_ID, nodes=NODES )
TestBigtableClusterUpdate
python
tensorflow__tensorflow
tensorflow/python/ops/resource_variable_ops.py
{ "start": 105665, "end": 105925 }
class ____(StructurePattern): """Represents a list of StructurePatterns.""" def __init__(self, *components): self.components = list(components) def __eq__(self, other): return isinstance(other, PList) and self.components == other.components
PList
python
pytorch__pytorch
torch/export/unflatten.py
{ "start": 39298, "end": 58135 }
class ____: def __init__( self, flat_graph: torch.fx.Graph, nodes: tuple[torch.fx.Node, ...], seen_nodes, seen_modules, seen_attrs, created_modules, parent, module_stack: list[tuple[str, Optional[str], int]], module_id, module_call_graph: dict[str, ModuleCallSignature], module: Optional[Union[torch.fx.GraphModule, UnflattenedModule]] = None, ): self.flat_graph = flat_graph self.nodes = nodes self.seen_nodes = seen_nodes self.seen_modules = seen_modules self.seen_attrs = seen_attrs self.created_modules = created_modules self.parent = parent self.module_stack = module_stack self.module_id = module_id self.module_call_graph = module_call_graph self.verbose = False self.fqn, ty, num_calls = self.module_stack[-1] # generate call name for self.fqn self.child_fqn = _call_name(self.fqn, num_calls + 1) self.module: Union[torch.fx.GraphModule, UnflattenedModule, InterpreterModule] if module is not None: self.module = module self.ivals = module.ivals if hasattr(module, "ivals") else {} # type: ignore[var-annotated] else: self.module = self.created_modules.get( self.fqn, InterpreterModule(torch.fx.Graph(), ty=ty), ) self.ivals = parent.ivals self.graph = self.module.graph # Mapping of nodes in the flat graph to nodes in this graph. self.node_map: dict[torch.fx.Node, torch.fx.Node] = {} self.node_to_placeholder = {} self.parent_call_module: Optional[torch.fx.Node] = None if parent is not None: accessor = _compute_accessor(parent.fqn, self.child_fqn) def create_module(fqn): path = f"{parent.fqn}.{fqn}" if parent.fqn else fqn if path in self.created_modules: return self.created_modules[path] submod = InterpreterModule(torch.fx.Graph(), ty=ty) self.created_modules[path] = submod return submod _add_submodule(parent.module, accessor, self.module, create_module) self.parent_call_module = parent.graph.call_module(accessor) if self.seen_modules[self.module_id]: base_module_frame = self.seen_modules[self.module_id][0] self.module._modules = base_module_frame.module._modules self.seen_modules[self.module_id].append( _SubmoduleEntry( parent_fqn=self.parent.fqn, parent_module=self.parent.module, parent_call_module=self.parent_call_module, fqn=self.fqn, call_idx=num_calls + 1, module=self.module, ) ) signature = module_call_graph.get(self.child_fqn) if signature is not None and self.parent is not None: assert signature.in_spec.num_children == 2 assert signature.in_spec.type is tuple args_spec, kwargs_spec = signature.in_spec.children() assert args_spec.type is tuple assert kwargs_spec.type is dict with self.graph.inserting_after(None): arg_nodes = [ self.graph.placeholder(f"_positional_arg_{idx}") for idx in range(args_spec.num_children) ] kwarg_nodes = {} for name in kwargs_spec.context: kwarg_nodes[name] = self.graph.placeholder(name) flat_args = _generate_flatten_spec( self.module, (tuple(arg_nodes), kwarg_nodes), signature.in_spec, ) for idx, arg in enumerate(signature.inputs): flat_arg_node = self.graph.create_node( op="call_function", target=operator.getitem, args=(flat_args, idx), name=( arg.name if not isinstance(arg, ConstantArgument) else f"_constant_{idx}" ), ) if isinstance(arg, ConstantArgument): continue if arg.name in self.seen_nodes: flat_arg_node.meta = copy.copy(self.seen_nodes[arg.name].meta) self.node_to_placeholder[self.seen_nodes[arg.name]] = ( flat_arg_node ) with self.parent.graph.inserting_before(self.parent_call_module): input_nodes: list[Optional[torch.fx.Node]] = [] for input in signature.inputs: if isinstance(input, ConstantArgument): input_nodes.append(input.value) # type: ignore[arg-type] elif input.name not in self.seen_nodes: input_nodes.append(None) else: assert isinstance( input, ( TensorArgument, SymIntArgument, SymBoolArgument, SymFloatArgument, ), ) input_nodes.append( self.parent.remap_input(self.seen_nodes[input.name]) ) inputs_node = _generate_unflatten( self.parent.module, input_nodes, signature.in_spec, ) args_node = self.parent.graph.call_function( operator.getitem, (inputs_node, 0) ) kwargs_node = self.parent.graph.call_function( operator.getitem, (inputs_node, 1) ) arg_nodes = [ self.parent.graph.call_function(operator.getitem, (args_node, i)) for i in range(args_spec.num_children) ] kwarg_nodes = { k: self.parent.graph.call_function( operator.getitem, (kwargs_node, k) ) for k in kwargs_spec.context } assert self.parent_call_module is not None # pyrefly: ignore [bad-assignment] self.parent_call_module.args = tuple(arg_nodes) self.parent_call_module.kwargs = kwarg_nodes # type: ignore[assignment] def add_placeholder(self, x): assert self.fqn != "", f"Cannot add placeholder {x} to root module" assert x.graph is self.flat_graph # x is not in subgraph, create a new placeholder for subgraph with self.graph.inserting_before(None): placeholder_node = self.graph.placeholder(x.name, type_expr=x.type) # copy all meta fields, even if some fields might be irrelevant for # the placeholder node placeholder_node.meta = copy.copy(x.meta) self.node_to_placeholder[x] = placeholder_node def copy_sym_call_function(self, x): # This only exists because we deduplicate sym_size nodes in the flat export graph, # and if preserve_module_call_signature is set, we may not be able to pass sym_size # nodes, or their downstream users, as inputs to submodule calls. # To avoid this we copy these call_function nodes with sym_type results. # This should however only be done for sym_type nodes - call_function nodes on tensors # should not be deduplicated in the first place. args = pytree.tree_map_only(torch.fx.Node, self.remap_input, x.args) kwargs = pytree.tree_map_only(torch.fx.Node, self.remap_input, x.kwargs) node = self.graph.call_function(x.target, args, kwargs) node.meta = copy.copy(x.meta) self.node_map[x] = node return node def remap_input(self, x): assert x.graph is self.flat_graph if x in self.node_map: return self.node_map[x] self.print(f"remap_input({x})") if x in self.node_to_placeholder: return self.node_to_placeholder[x] elif ( x.op == "placeholder" or self.module_call_graph.get(self.fqn) is None # allow placeholder creation if we are not preserving module call signature ): self.add_placeholder(x) if self.parent_call_module is not None: # Important to *prepend* the output to match how we are # inserting placeholder nodes. with self.parent.graph.inserting_before(self.parent_call_module): self.parent_call_module.insert_arg(0, self.parent.remap_input(x)) return self.node_to_placeholder[x] elif x.op == "call_function" and ( x.target in ( torch.ops.aten.sym_size.int, torch.ops.aten.item.default, torch.ops.aten.unbind.int, torch.ops.aten.sum.dim_IntList, torch.ops.aten.view.default, torch.ops.aten.diff.default, ) or (hasattr(x.target, "__module__") and x.target.__module__ == "_operator") ): # export deduplicates sym_size nodes, and may need to re-copy them # if module call signature needs to be preserved self.copy_sym_call_function(x) return self.node_map[x] elif self.module_call_graph.get(self.fqn) is not None: # x is reading the intermediate value of a mutation, so record it; # later we will find where it was created and perform the update return self.ivals.read(self, x) # type: ignore[operator, union-attr] else: raise RuntimeError( f"Could not run remap_input() on op type: {x.op} for node {x}" ) def finalize_outputs(self): self.created_modules.pop(self.fqn, None) orig_outputs = [] signature = self.module_call_graph.get(self.child_fqn) if signature is not None and self.parent is not None: for output in signature.outputs: if isinstance( output, ( TensorArgument, SymIntArgument, SymBoolArgument, SymFloatArgument, ConstantArgument, ), ): if output.name in self.seen_nodes: orig_outputs.append(self.seen_nodes[output.name]) else: orig_outputs.append(None) else: raise RuntimeError( f"Unsupported data type for output node: {output}" ) def get_actual_output_node(output): if output is None: return None seen_node = self.seen_nodes[output.name] if seen_node in self.node_map: return self.node_map[seen_node] elif seen_node in self.node_to_placeholder: return self.node_to_placeholder[seen_node] else: raise RuntimeError( f"Could not find output node {output}. Graph: {self.graph}" ) tree_out_node = _generate_unflatten( self.module, tuple(get_actual_output_node(output) for output in orig_outputs), signature.out_spec, ) parent_out: Optional[torch.fx.Node] = _generate_flatten_spec( self.parent.module, self.parent_call_module, signature.out_spec ) graph_outputs: Union[torch.fx.Node, list[torch.fx.Node]] = tree_out_node else: graph_outputs = [] # Iterate through nodes we have copied into self.graph. for orig_node in self.node_map: for user_node in orig_node.users: if user_node.name not in self.seen_nodes: # external user node, need to expose as an output orig_outputs.append(orig_node) graph_outputs.append(self.node_map[orig_node]) break parent_out = self.parent_call_module if len(graph_outputs) == 1: graph_outputs = graph_outputs[0] assert isinstance(graph_outputs, (list, torch.fx.Node)) self.graph.output(graph_outputs) # Rewrite outputs in parent module if parent_out is None: return parent_out.meta["val"] = ( graph_outputs.meta.get("val") if isinstance(graph_outputs, torch.fx.Node) else [o.meta.get("val") for o in graph_outputs] ) if len(orig_outputs) == 1 and signature is None: self.parent.node_map[orig_outputs[0]] = parent_out else: for i, orig_output in enumerate(orig_outputs): if orig_output is None: continue # Use Proxy to record getitem access. proxy_out = torch.fx.Proxy(parent_out)[i].node # type: ignore[index] proxy_out.meta["val"] = orig_output.meta.get("val") self.parent.node_map[orig_output] = proxy_out def copy_node(self, node): self.print("copying", node.format_node()) self.node_map[node] = self.graph.node_copy(node, self.remap_input) self.seen_nodes[node.name] = node def run_outer(self): for i, node in enumerate(self.flat_graph.nodes): self.print(i, node.meta.get("nn_module_stack"), node.format_node()) # Copy all graph inputs node_idx: int = 0 node = self.nodes[node_idx] while node.op == "placeholder": self.copy_node(node) node_idx += 1 node = self.nodes[node_idx] self.run_from(node_idx) # Copy graph outputs for node in self.flat_graph.nodes: if node.op == "output": self.copy_node(node) def print(self, *args, **kwargs): if self.verbose: # pyrefly: ignore [not-iterable] print(*args, **kwargs) def run_from(self, node_idx): module_idx = 0 # Walk through the graph, building up a new graph with the right submodules while node_idx < len(self.nodes): node = self.nodes[node_idx] assert node.op != "placeholder" self.print() self.print("STEP", node_idx, node.format_node()) self.print(self.module_stack) depth = len(self.module_stack) if node.op == "output": if depth == 1: # We want the output node of the original graph to be handled # specially by the outermost stack frame (in run_outer). So # skip finalization here. return node_idx # We've reached the end of the graph. Wrap up all the existing stack frames. self.finalize_outputs() return node_idx if len(node.meta.get("nn_module_stack", {})) == 0: raise RuntimeError(f"Unable to find nn_module_stack for node {node}") nn_module_stack = node.meta["nn_module_stack"] from torch._export.passes._node_metadata_hook import ( _EMPTY_NN_MODULE_STACK_KEY, ) if ( len(nn_module_stack) == 1 and _EMPTY_NN_MODULE_STACK_KEY in nn_module_stack ): # Empty case from the node_metadata_hook node_module_stack = self.module_stack else: node_module_stack = [ ( path, ty if path else None, int(k.split("@")[-1]) if "@" in k else 0, ) for k, (path, ty) in node.meta["nn_module_stack"].items() ] if node_module_stack[:depth] != self.module_stack: # This means that the current module is done executing and the # current node is the beginning of a new module. # # In this case, we should finalize this module and return without # incrementing the node counter. self.finalize_outputs() self.print("outlining", self.fqn) self.print(self.graph) return node_idx assert node_module_stack is not None if _is_prefix(self.module_stack, node_module_stack): # This means that the current node represents the execution of a new # module. next_module = node_module_stack[depth] self.print("Creating new stack frame for", next_module) # Run a nested version of module outliner from the current node # counter. Once it is complete, continue from that point. next_module_key = list(node.meta["nn_module_stack"].keys())[depth] node_idx = _ModuleFrame( self.flat_graph, self.nodes, self.seen_nodes, self.seen_modules, self.seen_attrs, self.created_modules, self, self.module_stack + [next_module], next_module_key.split("@")[0], self.module_call_graph, ).run_from(node_idx) module_idx += 1 continue # The only remaining possibility is that we are in the right stack # frame. Copy the node into this frame's graph and increment the node counter. assert node_module_stack == self.module_stack if node.op == "get_attr": # this must be a graph argument for a HOP self.seen_attrs[self.child_fqn].add(node.target) self.copy_node(node) # pyrefly: ignore [unsupported-operation] node_idx += 1 @dataclass
_ModuleFrame
python
tensorflow__tensorflow
tensorflow/python/util/lock_util.py
{ "start": 738, "end": 4253 }
class ____(object): """A lock to allow many members of a group to access a resource exclusively. This lock provides a way to allow access to a resource by multiple threads belonging to a logical group at the same time, while restricting access to threads from all other groups. You can think of this as an extension of a reader-writer lock, where you allow multiple writers at the same time. We made it generic to support multiple groups instead of just two - readers and writers. Simple usage example with two groups accessing the same resource: ```python lock = GroupLock(num_groups=2) # In a member of group 0: with lock.group(0): # do stuff, access the resource # ... # In a member of group 1: with lock.group(1): # do stuff, access the resource # ... ``` Using as a context manager with `.group(group_id)` is the easiest way. You can also use the `acquire` and `release` method directly. """ __slots__ = ["_ready", "_num_groups", "_group_member_counts"] def __init__(self, num_groups=2): """Initialize a group lock. Args: num_groups: The number of groups that will be accessing the resource under consideration. Should be a positive number. Returns: A group lock that can then be used to synchronize code. Raises: ValueError: If num_groups is less than 1. """ if num_groups < 1: raise ValueError( "Argument `num_groups` must be a positive integer. " f"Received: num_groups={num_groups}") self._ready = threading.Condition(threading.Lock()) self._num_groups = num_groups self._group_member_counts = [0] * self._num_groups def group(self, group_id): """Enter a context where the lock is with group `group_id`. Args: group_id: The group for which to acquire and release the lock. Returns: A context manager which will acquire the lock for `group_id`. """ self._validate_group_id(group_id) return self._Context(self, group_id) def acquire(self, group_id): """Acquire the group lock for a specific group `group_id`.""" self._validate_group_id(group_id) self._ready.acquire() while self._another_group_active(group_id): self._ready.wait() self._group_member_counts[group_id] += 1 self._ready.release() def release(self, group_id): """Release the group lock for a specific group `group_id`.""" self._validate_group_id(group_id) self._ready.acquire() self._group_member_counts[group_id] -= 1 if self._group_member_counts[group_id] == 0: self._ready.notify_all() self._ready.release() def _another_group_active(self, group_id): return any( c > 0 for g, c in enumerate(self._group_member_counts) if g != group_id) def _validate_group_id(self, group_id): if group_id < 0 or group_id >= self._num_groups: raise ValueError( "Argument `group_id` should verify `0 <= group_id < num_groups` " f"(with `num_groups={self._num_groups}`). " f"Received: group_id={group_id}") class _Context(object): """Context manager helper for `GroupLock`.""" __slots__ = ["_lock", "_group_id"] def __init__(self, lock, group_id): self._lock = lock self._group_id = group_id def __enter__(self): self._lock.acquire(self._group_id) def __exit__(self, type_arg, value_arg, traceback_arg): del type_arg, value_arg, traceback_arg self._lock.release(self._group_id)
GroupLock
python
pytorch__pytorch
test/jit/test_hash.py
{ "start": 386, "end": 3409 }
class ____(JitTestCase): def test_hash_tuple(self): def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool: return hash(t1) == hash(t2) self.checkScript(fn, ((1, 2), (1, 2))) self.checkScript(fn, ((1, 2), (3, 4))) self.checkScript(fn, ((1, 2), (2, 1))) def test_hash_tuple_nested_unhashable_type(self): # Tuples may contain unhashable types like `list`, check that we error # properly in that case. @torch.jit.script def fn_unhashable(t1: Tuple[int, List[int]]): return hash(t1) with self.assertRaisesRegexWithHighlight(RuntimeError, "unhashable", "hash"): fn_unhashable((1, [1])) def test_hash_tensor(self): """Tensors should hash by identity""" def fn(t1, t2): return hash(t1) == hash(t2) tensor1 = torch.tensor(1) tensor1_clone = torch.tensor(1) tensor2 = torch.tensor(2) self.checkScript(fn, (tensor1, tensor1)) self.checkScript(fn, (tensor1, tensor1_clone)) self.checkScript(fn, (tensor1, tensor2)) def test_hash_none(self): def fn(): n1 = None n2 = None return hash(n1) == hash(n2) self.checkScript(fn, ()) def test_hash_bool(self): def fn(b1: bool, b2: bool): return hash(b1) == hash(b2) self.checkScript(fn, (True, False)) self.checkScript(fn, (True, True)) self.checkScript(fn, (False, True)) self.checkScript(fn, (False, False)) def test_hash_float(self): def fn(f1: float, f2: float): return hash(f1) == hash(f2) self.checkScript(fn, (1.2345, 1.2345)) self.checkScript(fn, (1.2345, 6.789)) self.checkScript(fn, (1.2345, float("inf"))) self.checkScript(fn, (float("inf"), float("inf"))) self.checkScript(fn, (1.2345, float("nan"))) self.checkScript(fn, (float("nan"), float("inf"))) def test_hash_int(self): def fn(i1: int, i2: int): return hash(i1) == hash(i2) self.checkScript(fn, (123, 456)) self.checkScript(fn, (123, 123)) self.checkScript(fn, (123, -123)) self.checkScript(fn, (-123, -123)) self.checkScript(fn, (123, 0)) def test_hash_string(self): def fn(s1: str, s2: str): return hash(s1) == hash(s2) self.checkScript(fn, ("foo", "foo")) self.checkScript(fn, ("foo", "bar")) self.checkScript(fn, ("foo", "")) def test_hash_device(self): def fn(d1: torch.device, d2: torch.device): return hash(d1) == hash(d2) gpu0 = torch.device("cuda:0") gpu1 = torch.device("cuda:1") cpu = torch.device("cpu") self.checkScript(fn, (gpu0, gpu0)) self.checkScript(fn, (gpu0, gpu1)) self.checkScript(fn, (gpu0, cpu)) self.checkScript(fn, (cpu, cpu)) if __name__ == "__main__": raise_on_run_directly("test/test_jit.py")
TestHash
python
scipy__scipy
scipy/signal/tests/test_signaltools.py
{ "start": 133306, "end": 137963 }
class ____: def test_bad_args(self, xp): x = xp.asarray([1.0 + 0.0j]) assert_raises(ValueError, hilbert, x) x = xp.arange(8.0) assert_raises(ValueError, hilbert, x, N=0) def test_hilbert_theoretical(self, xp): # test cases by Ariel Rokem decimal = 14 pi = xp.pi t = xp.arange(0, 2 * pi, pi / 256, dtype=xp.float64) a0 = xp.sin(t) a1 = xp.cos(t) a2 = xp.sin(2 * t) a3 = xp.cos(2 * t) a = xp.stack([a0, a1, a2, a3]) h = hilbert(a) h_abs = xp.abs(h) h_angle = xp.atan2(xp.imag(h), xp.real(h)) # np.angle(h) h_real = xp.real(h) # The real part should be equal to the original signals: assert_almost_equal(h_real, a, decimal) # The absolute value should be one everywhere, for this input: assert_almost_equal(h_abs, xp.ones(a.shape), decimal) # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in # the first 256 bins: assert_almost_equal(h_angle[0, :256], xp.arange(-pi / 2, pi / 2, pi / 256, dtype=xp.float64), decimal) # For the 'slow' cosine - the phase should go from 0 to pi in the # same interval: assert_almost_equal( h_angle[1, :256], xp.arange(0, pi, pi / 256, dtype=xp.float64), decimal) # The 'fast' sine should make this phase transition in half the time: assert_almost_equal(h_angle[2, :128], xp.arange(-pi / 2, pi / 2, pi / 128, dtype=xp.float64), decimal) # Ditto for the 'fast' cosine: assert_almost_equal( h_angle[3, :128], xp.arange(0, pi, pi / 128, dtype=xp.float64), decimal) # The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia assert_almost_equal(xp.imag(h[1, :]), a0, decimal) def test_hilbert_axisN(self, xp): # tests for axis and N arguments a = xp.reshape(xp.arange(18, dtype=xp.float64), (3, 6)) # test axis aa = hilbert(a, axis=-1) xp_assert_equal(hilbert(a.T, axis=0), aa.T) # test 1d assert_almost_equal(hilbert(a[0, :]), aa[0, :], 14) # test N aan = hilbert(a, N=20, axis=-1) assert aan.shape == (3, 20) assert hilbert(a.T, N=20, axis=0).shape == (20, 3) # the next test is just a regression test, # no idea whether numbers make sense a0hilb = np.array([0.000000000000000e+00 - 1.72015830311905j, 1.000000000000000e+00 - 2.047794505137069j, 1.999999999999999e+00 - 2.244055555687583j, 3.000000000000000e+00 - 1.262750302935009j, 4.000000000000000e+00 - 1.066489252384493j, 5.000000000000000e+00 + 2.918022706971047j, 8.881784197001253e-17 + 3.845658908989067j, -9.444121133484362e-17 + 0.985044202202061j, -1.776356839400251e-16 + 1.332257797702019j, -3.996802888650564e-16 + 0.501905089898885j, 1.332267629550188e-16 + 0.668696078880782j, -1.192678053963799e-16 + 0.235487067862679j, -1.776356839400251e-16 + 0.286439612812121j, 3.108624468950438e-16 + 0.031676888064907j, 1.332267629550188e-16 - 0.019275656884536j, -2.360035624836702e-16 - 0.1652588660287j, 0.000000000000000e+00 - 0.332049855010597j, 3.552713678800501e-16 - 0.403810179797771j, 8.881784197001253e-17 - 0.751023775297729j, 9.444121133484362e-17 - 0.79252210110103j]) a0hilb = xp.asarray(a0hilb) assert_almost_equal(aan[0, :], a0hilb, 14, err_msg='N regression') def test_hilbert_axis_3d(self, xp): a = xp.reshape(xp.arange(3 * 5 * 7, dtype=xp.float64), (3, 5, 7)) # test axis aa = hilbert(a, axis=-1) for axis in [0, 1]: aap = hilbert(xp.moveaxis(a, -1, axis), axis=axis) aap = xp.moveaxis(aap, axis, -1) xp_assert_equal(aa, aap) @pytest.mark.parametrize('dtype', ['float32', 'float64']) def test_hilbert_types(self, dtype, xp): dtype = getattr(xp, dtype) in_typed = xp.zeros(8, dtype=dtype) assert xp.real(hilbert(in_typed)).dtype == dtype @make_xp_test_case(hilbert2)
TestHilbert
python
sqlalchemy__sqlalchemy
test/typing/plain_files/orm/typed_queries.py
{ "start": 1298, "end": 10886 }
class ____(Base): __tablename__ = "user" id: Mapped[int] = mapped_column(primary_key=True) name: Mapped[str] data: Mapped[str] user_table = Table( "user", MetaData(), Column("id", Integer, primary_key=True), Column("name", String, primary_key=True), ) session = Session() e = create_engine("sqlite://") connection = e.connect() def t_select_1() -> None: stmt = select(User.id, User.name).filter(User.id == 5) assert_type(stmt, Select[int, str]) result = session.execute(stmt) assert_type(result, Result[int, str]) def t_select_2() -> None: stmt = ( select(User) .filter(User.id == 5) .limit(1) .offset(3) .offset(None) .limit(None) .limit(User.id) .offset(User.id) .fetch(1) .fetch(None) .fetch(User.id) ) assert_type(stmt, Select[User]) result = session.execute(stmt) assert_type(result, Result[User]) def t_select_3() -> None: ua = aliased(User) # this will fail at runtime, but as we at the moment see aliased(_T) # as _T, typing tools see the constructor as fine. # this line would ideally have a typing error but we'd need the ability # for aliased() to return some namespace of User that's not User. # AsAliased superclass type was tested for this but it had its own # awkwardnesses that aren't really worth it ua(id=1, name="foo") assert_type(ua, type[User]) stmt = select(ua.id, ua.name).filter(User.id == 5) assert_type(stmt, Select[int, str]) result = session.execute(stmt) assert_type(result, Result[int, str]) def t_select_4() -> None: ua = aliased(User) stmt = select(ua, User).filter(User.id == 5) assert_type(stmt, Select[User, User]) result = session.execute(stmt) assert_type(result, Result[User, User]) def t_legacy_query_single_entity() -> None: q1 = session.query(User).filter(User.id == 5) assert_type(q1, Query[User]) assert_type(q1.one(), User) assert_type(q1.all(), list[User]) # mypy switches to builtins.list for some reason here assert_type(q1.only_return_tuples(True).all(), list[Row[User]]) assert_type(q1.tuples().all(), list[tuple[User]]) def t_legacy_query_cols_1() -> None: q1 = session.query(User.id, User.name).filter(User.id == 5) assert_type(q1, RowReturningQuery[int, str]) assert_type(q1.one(), Row[int, str]) r1 = q1.one() x, y = r1 assert_type(x, int) assert_type(y, str) def t_legacy_query_cols_tupleq_1() -> None: q1 = session.query(User.id, User.name).filter(User.id == 5) assert_type(q1, RowReturningQuery[int, str]) q2 = q1.tuples() assert_type(q2.one(), tuple[int, str]) r1 = q2.one() x, y = r1 assert_type(x, int) assert_type(y, str) def t_legacy_query_cols_1_with_entities() -> None: q1 = session.query(User).filter(User.id == 5) assert_type(q1, Query[User]) q2 = q1.with_entities(User.id, User.name) assert_type(q2, RowReturningQuery[int, str]) assert_type(q2.one(), Row[int, str]) r1 = q2.one() x, y = r1 assert_type(x, int) assert_type(y, str) def t_select_with_only_cols() -> None: q1 = select(User).where(User.id == 5) assert_type(q1, Select[User]) q2 = q1.with_only_columns(User.id, User.name) assert_type(q2, Select[int, str]) row = connection.execute(q2).one() assert_type(row, Row[int, str]) x, y = row assert_type(x, int) assert_type(y, str) def t_legacy_query_cols_2() -> None: a1 = aliased(User) q1 = session.query(User, a1, User.name).filter(User.id == 5) assert_type(q1, RowReturningQuery[User, User, str]) assert_type(q1.one(), Row[User, User, str]) r1 = q1.one() x, y, z = r1 assert_type(x, User) assert_type(y, User) assert_type(z, str) def t_legacy_query_cols_2_with_entities() -> None: q1 = session.query(User) assert_type(q1, Query[User]) a1 = aliased(User) q2 = q1.with_entities(User, a1, User.name).filter(User.id == 5) assert_type(q2, RowReturningQuery[User, User, str]) assert_type(q2.one(), Row[User, User, str]) r1 = q2.one() x, y, z = r1 assert_type(x, User) assert_type(y, User) assert_type(z, str) def t_select_add_col_loses_type() -> None: q1 = select(User.id, User.name).filter(User.id == 5) q2 = q1.add_columns(User.data) # note this should not match Select assert_type(q2, Select[Unpack[tuple[Any, ...]]]) def t_legacy_query_add_col_loses_type() -> None: q1 = session.query(User.id, User.name).filter(User.id == 5) q2 = q1.add_columns(User.data) # this should match only Any assert_type(q2, Query[Any]) ua = aliased(User) q3 = q1.add_entity(ua) assert_type(q3, Query[Any]) def t_legacy_query_scalar_subquery() -> None: """scalar subquery should receive the type if first element is a column only""" q1 = session.query(User.id) q2 = q1.scalar_subquery() # this should be int but mypy can't see it due to the # overload that tries to match an entity. assert_type(q2, ScalarSelect[Any]) q3 = session.query(User) q4 = q3.scalar_subquery() assert_type(q4, ScalarSelect[Any]) q5 = session.query(User, User.name) q6 = q5.scalar_subquery() assert_type(q6, ScalarSelect[Any]) # try to simulate the problem with select() q7 = session.query(User).only_return_tuples(True) q8 = q7.scalar_subquery() assert_type(q8, ScalarSelect[Any]) def t_select_scalar_subquery() -> None: """scalar subquery should receive the type if first element is a column only""" s1 = select(User.id) s2 = s1.scalar_subquery() # this should be int but mypy can't see it due to the # overload that tries to match an entity. assert_type(s2, ScalarSelect[Any]) s3 = select(User) s4 = s3.scalar_subquery() # it's more important that mypy doesn't get a false positive of # 'User' here assert_type(s4, ScalarSelect[Any]) def t_select_w_core_selectables() -> None: """things that come from .c. or are FromClause objects currently are not typed. Make sure we are still getting Select at least. """ s1 = select(User.id, User.name).subquery() assert_type(s1.c.name, KeyedColumnElement[Any]) s2 = select(User.id, s1.c.name) # this one unfortunately is not working in mypy. # pylance gets the correct type # EXPECTED_TYPE: Select[tuple[int, Any]] # when experimenting with having a separate TypedSelect class for typing, # mypy would downgrade to Any rather than picking the basemost type. # with typing integrated into Select etc. we can at least get a Select # object back. assert_type(s2, Select[Unpack[tuple[Any, ...]]]) # so a fully explicit type may be given s2_typed: Select[tuple[int, str]] = select(User.id, s1.c.name) assert_type(s2_typed, Select[tuple[int, str]]) # plain FromClause etc we at least get Select s3 = select(s1) assert_type(s3, Select[Unpack[tuple[Any, ...]]]) t1 = User.__table__ assert t1 is not None assert_type(t1, FromClause) s4 = select(t1) assert_type(s4, Select[Unpack[tuple[Any, ...]]]) def t_dml_insert() -> None: s1 = insert(User).returning(User.id, User.name) r1 = session.execute(s1) assert_type(r1, Result[int, str]) s2 = insert(User).returning(User) r2 = session.execute(s2) assert_type(r2, Result[User]) s3 = insert(User).returning(func.foo(), column("q")) assert_type(s3, ReturningInsert[Unpack[tuple[Any, ...]]]) r3 = session.execute(s3) assert_type(r3, Result[Unpack[tuple[Any, ...]]]) def t_dml_bare_insert() -> None: s1 = insert(User) r1 = session.execute(s1) assert_type(r1, Result[Unpack[tuple[Any, ...]]]) def t_dml_bare_update() -> None: s1 = update(User) r1 = session.execute(s1) assert_type(r1, Result[Unpack[tuple[Any, ...]]]) def t_dml_update_with_values() -> None: s1 = update(User).values({User.id: 123, User.data: "value"}) r1 = session.execute(s1) assert_type(r1, Result[Unpack[tuple[Any, ...]]]) def t_dml_bare_delete() -> None: s1 = delete(User) r1 = session.execute(s1) assert_type(r1, Result[Unpack[tuple[Any, ...]]]) def t_dml_update() -> None: s1 = update(User).returning(User.id, User.name) r1 = session.execute(s1) assert_type(r1, Result[int, str]) def t_dml_delete() -> None: s1 = delete(User).returning(User.id, User.name) r1 = session.execute(s1) assert_type(r1, Result[int, str]) def t_from_statement() -> None: t = text("select * from user") assert_type(t, TextClause) select(User).from_statement(t) ts = text("select * from user").columns(User.id, User.name) assert_type(ts, TextualSelect) select(User).from_statement(ts) ts2 = text("select * from user").columns( user_table.c.id, user_table.c.name ) assert_type(ts2, TextualSelect) select(User).from_statement(ts2) def t_aliased_fromclause() -> None: a1 = aliased(User, user_table) a2 = aliased(User, user_table.alias()) a3 = aliased(User, join(user_table, user_table.alias())) a4 = aliased(user_table) assert_type(a1, type[User]) assert_type(a2, type[User]) assert_type(a3, type[User]) assert_type(a4, FromClause) def test_select_from() -> None: select(1).select_from(User).exists() exists(1).select_from(User).select()
User
python
getsentry__sentry
src/sentry/models/rule.py
{ "start": 6025, "end": 6431 }
class ____(Model): __relocation_scope__ = RelocationScope.Organization rule = FlexibleForeignKey("sentry.Rule") organization = FlexibleForeignKey("sentry.Organization") disable_date = models.DateTimeField() opted_out = models.BooleanField(default=False) sent_initial_email_date = models.DateTimeField(null=True) sent_final_email_date = models.DateTimeField(null=True)
NeglectedRule
python
astropy__astropy
astropy/table/tests/test_index.py
{ "start": 1268, "end": 28666 }
class ____(SetupData): def _setup(self, main_col, table_types): super()._setup(table_types) self.main_col = main_col if isinstance(main_col, u.Quantity): self._table_type = QTable if not isinstance(main_col, list): self._column_type = lambda x: x # don't change mixin type self.mutable = isinstance(main_col, (list, u.Quantity)) def make_col(self, name, lst): return self._column_type(lst, name=name) def make_val(self, val): if isinstance(self.main_col, Time): return Time(val, format="jyear") return val @property def t(self): if not hasattr(self, "_t"): # Note that order of columns is important, and the 'a' column is # last to ensure that the index column does not need to be the first # column (as was discovered in #10025). Most testing uses 'a' and # ('a', 'b') for the columns. self._t = self._table_type() self._t["b"] = self._column_type([4.0, 5.1, 6.2, 7.0, 1.1]) self._t["c"] = self._column_type(["7", "8", "9", "10", "11"]) self._t["a"] = self._column_type(self.main_col) return self._t @pytest.mark.parametrize("composite", [False, True]) def test_table_index(self, main_col, table_types, composite, engine): self._setup(main_col, table_types) t = self.t t.add_index(("a", "b") if composite else "a", engine=engine) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) if not self.mutable: return # test altering table columns t["a"][0] = 4 t.add_row((6.0, "7", 6)) t["a"][3] = 10 t.remove_row(2) t.add_row((5.0, "9", 4)) assert_col_equal(t["a"], np.array([4, 2, 10, 5, 6, 4])) assert np.allclose(t["b"], np.array([4.0, 5.1, 7.0, 1.1, 6.0, 5.0])) assert np.all(t["c"].data == np.array(["7", "8", "10", "11", "7", "9"])) index = t.indices[0] ll = list(index.data.items()) if composite: assert np.all( ll == [ ((2, 5.1), [1]), ((4, 4.0), [0]), ((4, 5.0), [5]), ((5, 1.1), [3]), ((6, 6.0), [4]), ((10, 7.0), [2]), ] ) else: assert np.all( ll == [((2,), [1]), ((4,), [0, 5]), ((5,), [3]), ((6,), [4]), ((10,), [2])] ) t.remove_indices("a") assert len(t.indices) == 0 def test_table_slicing(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) for slice_ in ([0, 2], np.array([0, 2])): t2 = t[slice_] # t2 should retain an index on column 'a' assert len(t2.indices) == 1 assert_col_equal(t2["a"], [1, 3]) # the index in t2 should reorder row numbers after slicing assert np.all(t2.indices[0].sorted_data() == [0, 1]) # however, this index should be a deep copy of t1's index assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) def test_remove_rows(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index("a", engine=engine) # remove individual row t2 = t.copy() t2.remove_rows(2) assert_col_equal(t2["a"], [1, 2, 4, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3]) # remove by list, ndarray, or slice for cut in ([0, 2, 4], np.array([0, 2, 4]), slice(0, 5, 2)): t2 = t.copy() t2.remove_rows(cut) assert_col_equal(t2["a"], [2, 4]) assert np.all(t2.indices[0].sorted_data() == [0, 1]) with pytest.raises(ValueError): t.remove_rows((0, 2, 4)) def test_col_get_slice(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) # get slice t2 = t[1:3] # table slice assert_col_equal(t2["a"], [2, 3]) assert np.all(t2.indices[0].sorted_data() == [0, 1]) col_slice = t["a"][1:3] assert_col_equal(col_slice, [2, 3]) # true column slices discard indices if isinstance(t["a"], BaseColumn): assert len(col_slice.info.indices) == 0 # take slice of slice t2 = t[::2] assert_col_equal(t2["a"], np.array([1, 3, 5])) t3 = t2[::-1] assert_col_equal(t3["a"], np.array([5, 3, 1])) assert np.all(t3.indices[0].sorted_data() == [2, 1, 0]) t3 = t2[:2] assert_col_equal(t3["a"], np.array([1, 3])) assert np.all(t3.indices[0].sorted_data() == [0, 1]) # out-of-bound slices for t_empty in (t2[3:], t2[2:1], t3[2:]): assert len(t_empty["a"]) == 0 assert np.all(t_empty.indices[0].sorted_data() == []) if self.mutable: # get boolean mask mask = t["a"] % 2 == 1 t2 = t[mask] assert_col_equal(t2["a"], [1, 3, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2]) def test_col_set_slice(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index("a", engine=engine) # set slice t2 = t.copy() t2["a"][1:3] = np.array([6, 7]) assert_col_equal(t2["a"], np.array([1, 6, 7, 4, 5])) assert np.all(t2.indices[0].sorted_data() == [0, 3, 4, 1, 2]) # change original table via slice reference t2 = t.copy() t3 = t2[1:3] assert_col_equal(t3["a"], np.array([2, 3])) assert np.all(t3.indices[0].sorted_data() == [0, 1]) t3["a"][0] = 5 assert_col_equal(t3["a"], np.array([5, 3])) assert_col_equal(t2["a"], np.array([1, 5, 3, 4, 5])) assert np.all(t3.indices[0].sorted_data() == [1, 0]) assert np.all(t2.indices[0].sorted_data() == [0, 2, 3, 1, 4]) # set boolean mask t2 = t.copy() mask = t["a"] % 2 == 1 t2["a"][mask] = 0.0 assert_col_equal(t2["a"], [0, 2, 0, 4, 0]) assert np.all(t2.indices[0].sorted_data() == [0, 2, 4, 1, 3]) def test_multiple_slices(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index("a", engine=engine) for i in range(6, 51): t.add_row((1.0, "A", i)) assert_col_equal(t["a"], list(range(1, 51))) assert np.all(t.indices[0].sorted_data() == list(range(50))) evens = t[::2] assert np.all(evens.indices[0].sorted_data() == list(range(25))) reverse = evens[::-1] index = reverse.indices[0] assert (index.start, index.stop, index.step) == (48, -2, -2) assert np.all(index.sorted_data() == list(range(24, -1, -1))) # modify slice of slice reverse[-10:] = 0 expected = np.array(list(range(1, 51))) expected[:20][expected[:20] % 2 == 1] = 0 assert_col_equal(t["a"], expected) assert_col_equal(evens["a"], expected[::2]) assert_col_equal(reverse["a"], expected[::2][::-1]) # first ten evens are now zero assert np.all( t.indices[0].sorted_data() == ( [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19] + list(range(20, 50)) ) ) assert np.all(evens.indices[0].sorted_data() == list(range(25))) assert np.all(reverse.indices[0].sorted_data() == list(range(24, -1, -1))) # try different step sizes of slice t2 = t[1:20:2] assert_col_equal(t2["a"], [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]) assert np.all(t2.indices[0].sorted_data() == list(range(10))) t3 = t2[::3] assert_col_equal(t3["a"], [2, 8, 14, 20]) assert np.all(t3.indices[0].sorted_data() == [0, 1, 2, 3]) t4 = t3[2::-1] assert_col_equal(t4["a"], [14, 8, 2]) assert np.all(t4.indices[0].sorted_data() == [2, 1, 0]) def test_sort(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t[::-1] # reverse table assert_col_equal(t["a"], [5, 4, 3, 2, 1]) t.add_index("a", engine=engine) assert np.all(t.indices[0].sorted_data() == [4, 3, 2, 1, 0]) if not self.mutable: return # sort table by column a t2 = t.copy() t2.sort("a") assert_col_equal(t2["a"], [1, 2, 3, 4, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) # sort table by primary key t2 = t.copy() t2.sort() assert_col_equal(t2["a"], [1, 2, 3, 4, 5]) assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) def test_insert_row(self, main_col, table_types, engine): self._setup(main_col, table_types) if not self.mutable: return t = self.t t.add_index("a", engine=engine) t.insert_row(2, (1.0, "12", 6)) assert_col_equal(t["a"], [1, 2, 6, 3, 4, 5]) assert np.all(t.indices[0].sorted_data() == [0, 1, 3, 4, 5, 2]) t.insert_row(1, (4.0, "13", 0)) assert_col_equal(t["a"], [1, 0, 2, 6, 3, 4, 5]) assert np.all(t.indices[0].sorted_data() == [1, 0, 2, 4, 5, 6, 3]) def test_index_modes(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) # first, no special mode assert len(t[[1, 3]].indices) == 1 assert len(t[::-1].indices) == 1 assert len(self._table_type(t).indices) == 1 assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t2 = t.copy() # non-copy mode with t.index_mode("discard_on_copy"): assert len(t[[1, 3]].indices) == 0 assert len(t[::-1].indices) == 0 assert len(self._table_type(t).indices) == 0 assert len(t2.copy().indices) == 1 # mode should only affect t # make sure non-copy mode is exited correctly assert len(t[[1, 3]].indices) == 1 if not self.mutable: return # non-modify mode with t.index_mode("freeze"): assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t["a"][0] = 6 assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t.add_row((1.5, "12", 2)) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t.remove_rows([1, 3]) assert np.all(t.indices[0].sorted_data() == [0, 1, 2, 3, 4]) assert_col_equal(t["a"], [6, 3, 5, 2]) # mode should only affect t assert np.all(t2.indices[0].sorted_data() == [0, 1, 2, 3, 4]) t2["a"][0] = 6 assert np.all(t2.indices[0].sorted_data() == [1, 2, 3, 4, 0]) # make sure non-modify mode is exited correctly assert np.all(t.indices[0].sorted_data() == [3, 1, 2, 0]) if isinstance(t["a"], BaseColumn): assert len(t["a"][::-1].info.indices) == 0 with t.index_mode("copy_on_getitem"): assert len(t["a"][[1, 2]].info.indices) == 1 # mode should only affect t assert len(t2["a"][[1, 2]].info.indices) == 0 assert len(t["a"][::-1].info.indices) == 0 assert len(t2["a"][::-1].info.indices) == 0 def test_index_retrieval(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) t.add_index(["a", "c"], engine=engine) assert len(t.indices) == 2 assert len(t.indices["a"].columns) == 1 assert len(t.indices["a", "c"].columns) == 2 with pytest.raises(IndexError): t.indices["b"] def test_col_rename(self, main_col, table_types, engine): """ Checks for a previous bug in which copying a Table with different column names raised an exception. """ self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) t2 = self._table_type(self.t, names=["d", "e", "f"]) assert len(t2.indices) == 1 def test_table_loc(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) t.add_index("b", engine=engine) t2 = t.loc[self.make_val(3)] # single label, with primary key 'a' assert_col_equal(t2["a"], [3]) assert isinstance(t2, Row) # list search t2 = t.loc[[self.make_val(1), self.make_val(4), self.make_val(2)]] assert_col_equal(t2["a"], [1, 4, 2]) # same order as input list if not isinstance(main_col, Time): # ndarray search t2 = t.loc[np.array([1, 4, 2])] assert_col_equal(t2["a"], [1, 4, 2]) assert_col_equal(t2["a"], [1, 4, 2]) t2 = t.loc[self.make_val(3) : self.make_val(5)] # range search assert_col_equal(t2["a"], [3, 4, 5]) t2 = t.loc.with_index("b")[5.0:7.0] assert_col_equal(t2["b"], [5.1, 6.2, 7.0]) # search by sorted index t2 = t.iloc[0:2] # two smallest rows by column 'a' assert_col_equal(t2["a"], [1, 2]) t2 = t.iloc.with_index("b")[2:] # exclude two smallest rows in column 'b' assert_col_equal(t2["b"], [5.1, 6.2, 7.0]) for t2 in (t.loc[:], t.iloc[:]): assert_col_equal(t2["a"], [1, 2, 3, 4, 5]) def test_table_loc_indices(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine) t.add_index("b", engine=engine) t2 = t.loc_indices[self.make_val(3)] # single label, with primary key 'a' assert t2 == 2 # list search t2 = t.loc_indices[[self.make_val(1), self.make_val(4), self.make_val(2)]] for i, p in zip(t2, [1, 4, 2]): # same order as input list assert i == p - 1 def test_invalid_search(self, main_col, table_types, engine): # using .loc and .loc_indices with a value not present should raise an exception self._setup(main_col, table_types) t = self.t t.add_index("a") with pytest.raises(KeyError): t.loc[self.make_val(6)] with pytest.raises(KeyError): t.loc_indices[self.make_val(6)] def test_copy_index_references(self, main_col, table_types, engine): # check against a bug in which indices were given an incorrect # column reference when copied self._setup(main_col, table_types) t = self.t t.add_index("a") t.add_index("b") t2 = t.copy() assert t2.indices["a"].columns[0] is t2["a"] assert t2.indices["b"].columns[0] is t2["b"] def test_unique_index(self, main_col, table_types, engine): self._setup(main_col, table_types) t = self.t t.add_index("a", engine=engine, unique=True) assert np.all(t.indices["a"].sorted_data() == [0, 1, 2, 3, 4]) if self.mutable: with pytest.raises(ValueError): t.add_row((5.0, "9", 5)) def test_copy_indexed_table(self, table_types): self._setup(_col, table_types) t = self.t t.add_index("a") t.add_index(["a", "b"]) for tp in (self._table_type(t), t.copy()): assert len(t.indices) == len(tp.indices) for index, indexp in zip(t.indices, tp.indices): assert np.all(index.data.data == indexp.data.data) assert index.data.data.colnames == indexp.data.data.colnames def test_updating_row_byindex(self, main_col, table_types, engine): self._setup(main_col, table_types) t = Table( [["a", "b", "c", "d"], [2, 3, 4, 5], [3, 4, 5, 6]], names=("a", "b", "c"), meta={"name": "first table"}, ) t.add_index("a", engine=engine) t.add_index("b", engine=engine) t.loc["c"] = ["g", 40, 50] # single label, with primary key 'a' t2 = t[2] assert list(t2) == ["g", 40, 50] # list search t.loc[["a", "d", "b"]] = [["a", 20, 30], ["d", 50, 60], ["b", 30, 40]] t2 = [["a", 20, 30], ["d", 50, 60], ["b", 30, 40]] for i, p in zip(t2, [1, 4, 2]): # same order as input list assert list(t[p - 1]) == i def test_invalid_updates(self, main_col, table_types, engine): # using .loc and .loc_indices with a value not present should raise an exception self._setup(main_col, table_types) t = Table( [[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]], names=("a", "b", "c"), meta={"name": "first table"}, ) t.add_index("a") with pytest.raises(ValueError): t.loc[3] = [[1, 2, 3]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5, 6], [2, 3]] with pytest.raises(ValueError): t.loc[[1, 4, 2]] = [[1, 2, 3], [4, 5], [2, 3]] def test_get_index(): a = [1, 4, 5, 2, 7, 4, 45] b = [2.0, 5.0, 8.2, 3.7, 4.3, 6.5, 3.3] t = Table([a, b], names=("a", "b"), meta={"name": "first table"}) t.add_index(["a"]) # Getting the values of index using names x1 = get_index(t, names=["a"]) assert isinstance(x1, SlicedIndex) assert len(x1.columns) == 1 assert len(x1.columns[0]) == 7 assert x1.columns[0].info.name == "a" # Getting the vales of index using table_copy x2 = get_index(t, table_copy=t[["a"]]) assert isinstance(x2, SlicedIndex) assert len(x2.columns) == 1 assert len(x2.columns[0]) == 7 assert x2.columns[0].info.name == "a" with pytest.raises(ValueError): get_index(t, names=["a"], table_copy=t[["a"]]) with pytest.raises(ValueError): get_index(t, names=None, table_copy=None) @pytest.mark.parametrize("table_type", [Table, QTable]) def test_index_loc_with_quantity(engine, table_type): t = table_type() t["a"] = [3, 1, 2] * u.m t["b"] = [1, 2, 3] t.add_index("a", engine=engine) unit = u.m if table_type is QTable else 1 assert tuple(t.loc[1 * unit]) == (1 * unit, 2) assert np.all(t.loc_indices[[1 * unit, 3 * unit]] == [1, 0]) assert tuple(t.iloc[1]) == (2 * unit, 3) for loc in (t.loc, t.iloc): t_loc = loc[:] assert len(t_loc) == 3 assert np.all(t_loc["a"] == [1, 2, 3] * unit) assert np.all(t_loc["b"] == [2, 3, 1]) def test_index_loc_with_string(engine): t = Table() t["a"] = ["z", "a", "m"] t["b"] = [1, 2, 3] t.add_index("a", engine=engine) assert tuple(t.loc["a"]) == ("a", 2) assert np.all(t.loc_indices[["a", "z"]] == [1, 0]) assert tuple(t.iloc[1]) == ("m", 3) for loc in (t.loc, t.iloc): t_loc = loc[:] assert len(t_loc) == 3 assert np.all(t_loc["a"] == ["a", "m", "z"]) assert np.all(t_loc["b"] == [2, 3, 1]) def test_table_index_time_warning(engine): # Make sure that no ERFA warnings are emitted when indexing a table by # a Time column with a non-default time scale tab = Table() tab["a"] = Time([1, 2, 3], format="jyear", scale="tai") tab["b"] = [4, 3, 2] with warnings.catch_warnings(record=True) as wlist: tab.add_index(("a", "b"), engine=engine) assert len(wlist) == 0 @pytest.mark.parametrize( "col", [ Column(np.arange(50000, 50005)), np.arange(50000, 50005) * u.m, Time(np.arange(50000, 50005), format="mjd"), ], ) def test_table_index_does_not_propagate_to_column_slices(col): # They lost contact to the parent table, so they should also not have # information on the indices; this helps prevent large memory usage if, # e.g., a large time column is turned into an object array; see gh-10688. tab = QTable() tab["t"] = col tab.add_index("t") t = tab["t"] assert t.info.indices tx = t[1:] assert not tx.info.indices tabx = tab[1:] t = tabx["t"] assert t.info.indices def test_hstack_qtable_table(): # Check in particular that indices are initialized or copied correctly # for a Column that is being converted to a Quantity. qtab = QTable([np.arange(5.0) * u.m], names=["s"]) qtab.add_index("s") tab = Table([Column(np.arange(5.0), unit=u.s)], names=["t"]) qstack = hstack([qtab, tab]) assert qstack["t"].info.indices == [] assert qstack.indices == [] def test_index_slice_exception(): with pytest.raises(TypeError, match="index_slice must be tuple or slice"): SlicedIndex(None, None) @pytest.fixture(scope="module") def simple_table(): """Simple table with an index on column 'a'.""" t = Table() t["a"] = [3, 1, 2, 3] t["b"] = ["x", "y", "z", "w"] t.add_index("a") return t @pytest.mark.parametrize("key", [None, "a"]) @pytest.mark.parametrize( "item,length,cls", [ (slice(0, 0), 0, Table), ([], 0, Table), ([1], 1, Table), ([1, 3], 3, Table), (np.array([]), 0, Table), (np.array([1]), 1, Table), (3, 2, Table), # scalar index with multiple rows (1, None, Row), # scalar index with single row ], ) def test_index_zero_slice_or_sequence_or_scalar(simple_table, key, item, length, cls): """Test that indexing with various types gives the expected result. Tests fix for #18037. """ loc = simple_table.loc.with_index(key) if key is not None else simple_table.loc tloc = loc[item] assert isinstance(tloc, cls) assert tloc.colnames == simple_table.colnames rows = simple_table.loc_indices[item] if cls is Table: assert len(tloc) == length assert len(rows) == length @pytest.mark.parametrize( "method,item", [ ("loc", (2, 5)), ("iloc", 1), ("loc_indices", (2, 5)), ], ) def test_index_id_item_deprecation_and_with_index(method, item): """t.loc/iloc/loc_indices[index_id, item] raises a deprecation warning. Also test that these methods """ t = Table() t["a"] = [1, 2, 3] t["b"] = [4, 5, 6] t["c"] = ["x", "y", "z"] index_id = ("a", "b") t.add_index(index_id) prop = getattr(t, method) # Test calling like t.loc.with_index("a", "b") and t.loc.with_index(("a", "b")). out_call_1 = prop.with_index(*index_id)[item] out_call_2 = prop.with_index(index_id)[item] with pytest.warns( AstropyDeprecationWarning, match=r"Calling `Table.loc/iloc/loc_indices\[index_id, item\]`", ): out_depr = prop[index_id, item] assert type(out_depr) is type(out_call_1) assert out_depr == out_call_1 assert type(out_call_1) is type(out_call_2) assert out_call_1 == out_call_2 def test_engine_type_error(): t = Table() t["a"] = [1, 2] t["b"] = [3, 4] with pytest.raises( TypeError, match=r"engine must be an Engine class or instance, got 'b' instead.", ): t.add_index("a", "b") # Easy mistake, too bad engine= is not keyword-only @pytest.mark.parametrize( "masked", [pytest.param(False, id="raw-array"), pytest.param(True, id="masked array")], ) def test_nd_columun_as_index(masked): # see https://github.com/astropy/astropy/issues/13292 # and https://github.com/astropy/astropy/pull/16360 t = Table() data = np.arange(0, 6) if masked: data = np.ma.masked_inside(data, 2, 4) t.add_column(data.reshape(3, -1), name="arr") with pytest.raises( ValueError, match="Multi-dimensional column 'arr' cannot be used as an index." ): t.add_index("arr") @pytest.mark.parametrize("index_first", [True, False]) def test_slice_an_indexed_table(index_first): """Test slicing a table that is already indexed. Test of fix for https://github.com/astropy/astropy/issues/10732. #10732 is the case index_first=True, but also test slicing first (index_first=False) since we're at it. """ t = Table() t["a"] = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] t["b"] = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] t["c"] = ["e", "f", "g", "h", "i", "j", "k", "a", "b", "c"] if index_first: t.add_index("a") t.add_index(["b", "c"]) ts = t[::2] else: ts = t[::2] ts.add_index("a") ts.add_index(["b", "c"]) assert ts.pformat() == [ " a b c ", "--- --- ---", " 9 0 e", " 7 0 g", " 5 0 i", " 3 1 k", " 1 1 b", ] # Index access works assert str(ts.loc[5]).splitlines() == [ " a b c ", "--- --- ---", " 5 0 i", ] # Remove row 2 (a==5), check index access still works ts.remove_row(2) assert ts.pformat() == [ " a b c ", "--- --- ---", " 9 0 e", " 7 0 g", " 3 1 k", " 1 1 b", ] assert str(ts.loc[1]).splitlines() == [ " a b c ", "--- --- ---", " 1 1 b", ] # Remove row 2 (now a==3), check index access still works ts.remove_row(2) assert ts.pformat() == [ " a b c ", "--- --- ---", " 9 0 e", " 7 0 g", " 1 1 b", ] assert str(ts.loc[7]).splitlines() == [ " a b c ", "--- --- ---", " 7 0 g", ] # Make sure primary index and secondary index look right (with original=True) assert str(ts.indices[0]).splitlines() == [ "<SlicedIndex original=True index=<Index columns=('a',) data=<SortedArray length=3>", " a rows", "--- ----", " 1 2", " 7 1", " 9 0>>", ] assert str(ts.indices[1]).splitlines() == [ "<SlicedIndex original=True index=<Index columns=('b', 'c') data=<SortedArray length=3>", " b c rows", "--- --- ----", " 0 e 0", " 0 g 1", " 1 b 2>>", ] def test_unique_indices_after_multicol_index_slice(): """Test that table indices after slicing are correct. This tests code in Table._new_from_slice() that ensures uniqueness of table index objects when slicing (via slice, ndarray, list etc) a table with a multi-column index. """ t = Table() t["a"] = [2, 3] t["b"] = [3, 5] t.add_index(["a", "b"]) t2 = t[:1] assert len(t2.indices) == 1 # without fix would be 2, both with id ("a", "b"). assert t2.indices[0].id == ("a", "b")
TestIndex
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 816374, "end": 816693 }
class ____( sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData ): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("invitation_email",) invitation_email = sgqlc.types.Field(String, graphql_name="invitationEmail")
OrgAddBillingManagerAuditEntry
python
Netflix__metaflow
metaflow/plugins/cards/card_modules/test_cards.py
{ "start": 3096, "end": 4163 }
class ____(MetaflowCard): """ This card takes no components and helps test the `current.card.refresh(data)` interface. """ HTML_TEMPLATE = REFRESHABLE_HTML_TEMPLATE RUNTIME_UPDATABLE = True ALLOW_USER_COMPONENTS = True # Not implementing Reload Policy here since the reload Policy is set to always RELOAD_POLICY = MetaflowCard.RELOAD_POLICY_ALWAYS type = "test_refresh_card" def render(self, task) -> str: return self._render_func(task, self.runtime_data) def _render_func(self, task, data): return self.HTML_TEMPLATE.replace( "[REPLACE_CONTENT_HERE]", json.dumps(data["user"]) ).replace("[PATHSPEC]", task.pathspec) def render_runtime(self, task, data): return self._render_func(task, data) def refresh(self, task, data): return data import hashlib def _component_values_to_hash(components): comma_str = ",".join(["".join(x) for v in components.values() for x in v]) return hashlib.sha256(comma_str.encode("utf-8")).hexdigest()
TestRefreshCard
python
kamyu104__LeetCode-Solutions
Python/minimum-number-of-days-to-eat-n-oranges.py
{ "start": 673, "end": 1338 }
class ____(object): def minDays(self, n): result = 0 q, lookup = [n], set([n]) while q: new_q = [] for i in q: if not i: return result if i-1 not in lookup: lookup.add(i-1) new_q.append(i-1) if i%2 == 0 and i//2 not in lookup: lookup.add(i//2) new_q.append(i//2) if i%3 == 0 and i//3 not in lookup: lookup.add(i//3) new_q.append(i//3) result += 1 q = new_q return result
Solution2
python
sqlalchemy__sqlalchemy
test/base/test_events.py
{ "start": 946, "end": 8435 }
class ____(TearDownLocalEventsFixture, fixtures.TestBase): """Test class- and instance-level event registration.""" def setup_test(self): class TargetEvents(event.Events): def event_one(self, x, y): pass def event_two(self, x): pass def event_three(self, x): pass class Target: dispatch = event.dispatcher(TargetEvents) self.Target = Target def test_register_class(self): def listen(x, y): pass event.listen(self.Target, "event_one", listen) eq_(len(self.Target().dispatch.event_one), 1) eq_(len(self.Target().dispatch.event_two), 0) def test_register_instance(self): def listen(x, y): pass t1 = self.Target() event.listen(t1, "event_one", listen) eq_(len(self.Target().dispatch.event_one), 0) eq_(len(t1.dispatch.event_one), 1) eq_(len(self.Target().dispatch.event_two), 0) eq_(len(t1.dispatch.event_two), 0) def test_bool_clslevel(self): def listen_one(x, y): pass event.listen(self.Target, "event_one", listen_one) t = self.Target() assert t.dispatch.event_one def test_register_class_instance(self): def listen_one(x, y): pass def listen_two(x, y): pass event.listen(self.Target, "event_one", listen_one) t1 = self.Target() event.listen(t1, "event_one", listen_two) eq_(len(self.Target().dispatch.event_one), 1) eq_(len(t1.dispatch.event_one), 2) eq_(len(self.Target().dispatch.event_two), 0) eq_(len(t1.dispatch.event_two), 0) def listen_three(x, y): pass event.listen(self.Target, "event_one", listen_three) eq_(len(self.Target().dispatch.event_one), 2) eq_(len(t1.dispatch.event_one), 3) def test_append_vs_insert_cls(self): def listen_one(x, y): pass def listen_two(x, y): pass def listen_three(x, y): pass event.listen(self.Target, "event_one", listen_one) event.listen(self.Target, "event_one", listen_two) event.listen(self.Target, "event_one", listen_three, insert=True) eq_( list(self.Target().dispatch.event_one), [listen_three, listen_one, listen_two], ) def test_append_vs_insert_instance(self): def listen_one(x, y): pass def listen_two(x, y): pass def listen_three(x, y): pass target = self.Target() event.listen(target, "event_one", listen_one) event.listen(target, "event_one", listen_two) event.listen(target, "event_one", listen_three, insert=True) eq_( list(target.dispatch.event_one), [listen_three, listen_one, listen_two], ) def test_decorator(self): @event.listens_for(self.Target, "event_one") def listen_one(x, y): pass @event.listens_for(self.Target, "event_two") @event.listens_for(self.Target, "event_three") def listen_two(x, y): pass eq_(list(self.Target().dispatch.event_one), [listen_one]) eq_(list(self.Target().dispatch.event_two), [listen_two]) eq_(list(self.Target().dispatch.event_three), [listen_two]) def test_no_instance_level_collections(self): @event.listens_for(self.Target, "event_one") def listen_one(x, y): pass t1 = self.Target() t2 = self.Target() t1.dispatch.event_one(5, 6) t2.dispatch.event_one(5, 6) is_( self.Target.dispatch._empty_listener_reg[self.Target]["event_one"], t1.dispatch.event_one, ) @event.listens_for(t1, "event_one") def listen_two(x, y): pass is_not( self.Target.dispatch._empty_listener_reg[self.Target]["event_one"], t1.dispatch.event_one, ) is_( self.Target.dispatch._empty_listener_reg[self.Target]["event_one"], t2.dispatch.event_one, ) def test_exec_once(self): m1 = Mock() event.listen(self.Target, "event_one", m1) t1 = self.Target() t2 = self.Target() t1.dispatch.event_one.for_modify(t1.dispatch).exec_once(5, 6) t1.dispatch.event_one.for_modify(t1.dispatch).exec_once(7, 8) t2.dispatch.event_one.for_modify(t2.dispatch).exec_once(9, 10) eq_(m1.mock_calls, [call(5, 6), call(9, 10)]) def test_real_name_wrong_dispatch(self): m1 = Mock() class E1(event.Events): @classmethod def _accept_with(cls, target, identifier): if isinstance(target, T1): return target else: m1.yup() return None def event_one(self, x, y): pass def event_two(self, x): pass def event_three(self, x): pass class T1: dispatch = event.dispatcher(E1) class T2: pass class E2(event.Events): _dispatch_target = T2 def event_four(self, x): pass with expect_raises_message( exc.InvalidRequestError, "No such event 'event_three'" ): @event.listens_for(E2, "event_three") def go(*arg): pass eq_(m1.mock_calls, [call.yup()]) def test_exec_once_exception(self): m1 = Mock() m1.side_effect = ValueError event.listen(self.Target, "event_one", m1) t1 = self.Target() assert_raises( ValueError, t1.dispatch.event_one.for_modify(t1.dispatch).exec_once, 5, 6, ) t1.dispatch.event_one.for_modify(t1.dispatch).exec_once(7, 8) eq_(m1.mock_calls, [call(5, 6)]) def test_exec_once_unless_exception(self): m1 = Mock() m1.side_effect = ValueError event.listen(self.Target, "event_one", m1) t1 = self.Target() assert_raises( ValueError, t1.dispatch.event_one.for_modify( t1.dispatch ).exec_once_unless_exception, 5, 6, ) assert_raises( ValueError, t1.dispatch.event_one.for_modify( t1.dispatch ).exec_once_unless_exception, 7, 8, ) m1.side_effect = None t1.dispatch.event_one.for_modify( t1.dispatch ).exec_once_unless_exception(9, 10) t1.dispatch.event_one.for_modify( t1.dispatch ).exec_once_unless_exception(11, 12) eq_(m1.mock_calls, [call(5, 6), call(7, 8), call(9, 10)]) def test_immutable_methods(self): t1 = self.Target() for meth in [ t1.dispatch.event_one.exec_once, t1.dispatch.event_one.insert, t1.dispatch.event_one.append, t1.dispatch.event_one.remove, t1.dispatch.event_one.clear, ]: assert_raises_message( NotImplementedError, r"need to call for_modify\(\)", meth )
EventsTest
python
jazzband__django-formtools
formtools/wizard/views.py
{ "start": 23589, "end": 23770 }
class ____(WizardView): """ A WizardView with pre-configured CookieStorage backend. """ storage_name = 'formtools.wizard.storage.cookie.CookieStorage'
CookieWizardView
python
sphinx-doc__sphinx
sphinx/ext/todo.py
{ "start": 1253, "end": 1881 }
class ____(SphinxAdmonition): """A todo entry, displayed (if configured) in the form of an admonition.""" node_class = todo_node def run(self) -> list[Node]: if not self.options.get('class'): self.options['class'] = ['admonition-todo'] (todo,) = super().run() if not isinstance(todo, todo_node): return [todo] todo.insert(0, nodes.title(text=_('Todo'))) todo['docname'] = self.env.current_document.docname self.add_name(todo) self.set_source_info(todo) self.state.document.note_explicit_target(todo) return [todo]
Todo
python
doocs__leetcode
solution/2700-2799/2782.Number of Unique Categories/Solution.py
{ "start": 135, "end": 622 }
class ____: def numberOfCategories( self, n: int, categoryHandler: Optional['CategoryHandler'] ) -> int: def find(x: int) -> int: if p[x] != x: p[x] = find(p[x]) return p[x] p = list(range(n)) for a in range(n): for b in range(a + 1, n): if categoryHandler.haveSameCategory(a, b): p[find(a)] = find(b) return sum(i == x for i, x in enumerate(p))
Solution
python
scikit-image__scikit-image
benchmarks/benchmark_graph.py
{ "start": 243, "end": 1349 }
class ____: """Benchmark for pixel graph routines in scikit-image.""" def setup(self): retina = color.rgb2gray(data.retina()) t0, _ = filters.threshold_multiotsu(retina, classes=3) mask = retina > t0 vessels = filters.sato(retina, sigmas=range(1, 10)) * mask thresholded = filters.apply_hysteresis_threshold(vessels, 0.01, 0.03) labeled = ndi.label(thresholded)[0] largest_nonzero_label = np.argmax(np.bincount(labeled[labeled > 0])) binary = labeled == largest_nonzero_label self.skeleton = morphology.skeletonize(binary) labeled2 = ndi.label(thresholded[::2, ::2])[0] largest_nonzero_label2 = np.argmax(np.bincount(labeled2[labeled2 > 0])) binary2 = labeled2 == largest_nonzero_label2 small_skeleton = morphology.skeletonize(binary2) self.g, self.n = graph.pixel_graph(small_skeleton, connectivity=2) def time_build_pixel_graph(self): graph.pixel_graph(self.skeleton, connectivity=2) def time_central_pixel(self): graph.central_pixel(self.g, self.n)
GraphSuite
python
pypa__pip
src/pip/_vendor/packaging/utils.py
{ "start": 483, "end": 614 }
class ____(ValueError): """ An invalid distribution name; users should refer to the packaging user guide. """
InvalidName
python
Textualize__textual
docs/examples/styles/min_height.py
{ "start": 112, "end": 509 }
class ____(App): CSS_PATH = "min_height.tcss" def compose(self): yield Horizontal( Placeholder("min-height: 25%", id="p1"), Placeholder("min-height: 75%", id="p2"), Placeholder("min-height: 30", id="p3"), Placeholder("min-height: 40w", id="p4"), ) if __name__ == "__main__": app = MinHeightApp() app.run()
MinHeightApp
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/control_flow/while_v2_test.py
{ "start": 3186, "end": 64189 }
class ____(test.TestCase, parameterized.TestCase): @test_util.run_deprecated_v1 def testSingleLoopVar(self): x = constant_op.constant(2.) ret = while_loop_v2( lambda v: v < 8., lambda v: v * v, [x], return_same_structure=False) grad = gradients_impl.gradients(ret, [x]) with self.cached_session(): self.assertEqual(self.evaluate(ret), 16.) self.assertSequenceEqual(self.evaluate(grad), [32.]) @test_util.run_deprecated_v1 def testSingleLoopVarBackPropFalse(self): x = constant_op.constant(2.) ret = while_loop_v2( lambda v: v < 8., lambda v: v * v, [x], return_same_structure=False, back_prop=False) grad = gradients_impl.gradients(ret, [x]) self.assertEqual(grad, [None]) with self.cached_session(): self.assertEqual(self.evaluate(ret), 16.) @test_util.run_deprecated_v1 def testCustomGradient(self): x = constant_op.constant(2.) n = constant_op.constant(1., name="const-n") m = variables.Variable(1.0) self.evaluate(variables.global_variables_initializer()) def body_fn(v): # pylint: disable=invalid-name @custom_gradient.custom_gradient def inner_fn(v): # pylint: disable=invalid-name def grad_fn(dy, variables=None): # pylint: disable=invalid-name, unused-argument, redefined-outer-name return dy * 2 * v * n * m, [v * v] return v * v * m, grad_fn return inner_fn(v) ret = while_loop_v2( lambda v: v < 8., body_fn, [x], return_same_structure=False) grad = gradients_impl.gradients(ret, [x]) with self.cached_session(): self.assertEqual(self.evaluate(ret), 16.) self.assertSequenceEqual(self.evaluate(grad), [32.]) @test_util.run_v1_only("b/120545219") def testReturnSameStructureTrue(self): x = constant_op.constant(2.) ret = while_loop_v2( lambda v: v < 8., lambda v: v * v, [x], return_same_structure=True) grad = gradients_impl.gradients(ret, [x]) with self.cached_session() as sess: eval_result = sess.run(ret) self.assertIsInstance(eval_result, list) self.assertLen(eval_result, 1) self.assertEqual(16., eval_result[0]) self.assertSequenceEqual(sess.run(grad), [32.]) def testVerifyInputOutputTypesMatch(self): @def_function.function def BuildWhile(): x = constant_op.constant(1., dtypes.float32) def Body(x): return math_ops.cast(x, dtypes.float16) + 1 while_loop_v2(lambda x: x < 10, Body, [x]) with self.assertRaisesRegex( TypeError, r"Loop var Const:0 enters the loop with type <dtype: 'float32'> " r"but has type <dtype: 'float16'> after 1 iteration."): BuildWhile() @parameterized.parameters(dtypes.float32, dtypes.float64) def testGradientTapeResourceVariable(self, dtype): with context.eager_mode(): v = variables.Variable(1., dtype=dtype) @def_function.function def fnWithLoop(): # pylint: disable=invalid-name with backprop.GradientTape() as tape: _, x = while_loop_v2( lambda i, _: i < 2, lambda i, x: (i + 1, x * v), [0, constant_op.constant(2., dtype=dtype)]) return tape.gradient(x, v) self.assertAllEqual(fnWithLoop(), 4.0) def testDeferredCaptures(self): with context.eager_mode(): c = constant_op.constant(10) @def_function.function def F(): def Body(_): return ops.get_default_graph().capture_call_time_value( lambda: c, tensor_spec.TensorSpec([], dtypes.int32)) x, = while_loop_v2(lambda i: True, Body, [0], maximum_iterations=1) return x self.assertAllEqual(F(), 10) def checkIteratedGradients(self, func): with context.eager_mode(): def _Grad(f): def _GradFunction(primal): with backprop.GradientTape() as tape: tape.watch(primal) primal_out = f(primal) return tape.gradient(primal_out, primal) return _GradFunction f = func one = constant_op.constant(1.) for _ in range(3): theoretical, numerical = gradient_checker_v2.compute_gradient( def_function.function(f), [one]) self.assertAllClose(theoretical, numerical, rtol=1e-3) f = _Grad(f) self.assertAllClose(array_ops.reshape(numerical, []), def_function.function(f)(one), rtol=1e-3) def testIteratedGradients(self): def _Func(x): _, z = while_loop_v2( lambda i, _: i < 2, lambda i, y: (i + 1, math_ops.cos(y)), [0, x]) return z self.checkIteratedGradients(_Func) def testIteratedGradientsWithList(self): def _Func(x): results = list_ops.empty_tensor_list( element_shape=[], element_dtype=dtypes.float32) def _LoopBody(i, y, handle): return (i + 1, math_ops.cos(y), list_ops.tensor_list_push_back(handle, y)) _, z, results = while_loop_v2( lambda i, _, h: i < 2, _LoopBody, [0, x, results]) return z + math_ops.reduce_sum(list_ops.tensor_list_stack( results, dtypes.float32)) self.checkIteratedGradients(_Func) def testGradWhileGradWhileWithVariable(self): with context.eager_mode(): v = variables.Variable(1.) @def_function.function def _Func(x): def _Inner(a): with backprop.GradientTape() as tape: tape.watch(a) _, b = while_loop_v2( lambda i, _: i < 2, lambda i, y: (i + 1, math_ops.cos(v + y)), [0, a]) return tape.gradient(b, a) _, z = while_loop_v2( lambda i, _: i < 2, lambda i, y: (i + 1, _Inner(y)), [0, x]) return z with backprop.GradientTape(persistent=True) as tape: x = constant_op.constant(1.) tape.watch(x) y = _Func(x) dx, _ = tape.gradient(y, [x, v]) theoretical, numerical = gradient_checker_v2.compute_gradient( _Func, [x]) self.assertAllClose(numerical, theoretical, rtol=1e-3) self.assertAllClose(array_ops.reshape(numerical, []), dx, rtol=1e-3) def testThreeNestWithLists(self): with context.eager_mode(): def _WrapInWhile(f): def _Wrapped(x): results = list_ops.empty_tensor_list( element_shape=[], element_dtype=dtypes.float32) def _LoopBody(i, y, handle): return (i + 1, f(math_ops.cos(y)), list_ops.tensor_list_push_back(handle, y)) _, z, results = while_loop.while_loop(lambda i, _, h: i < 2, _LoopBody, [0, x, results]) return z + math_ops.reduce_sum(list_ops.tensor_list_stack( results, dtypes.float32)) return _Wrapped f = math_ops.sin target_function = _WrapInWhile(_WrapInWhile(_WrapInWhile(f))) @def_function.function def _TapeFromGraphMode(x): with backprop.GradientTape(persistent=True) as tape: tape.watch(x) y = target_function(x) return tape.gradient(y, x) x = constant_op.constant(1.) dx = _TapeFromGraphMode(x) theoretical, numerical = gradient_checker_v2.compute_gradient( target_function, [x]) self.assertAllClose(numerical, theoretical, rtol=3e-3) self.assertAllClose(array_ops.reshape(numerical, []), dx, rtol=3e-3) def testDeviceLabelsInherited(self): def _LoopBody(i, y): result = math_ops.cos(y) self.assertIn("CPU:10", result.device) with ops.device("CPU:11"): result = array_ops.identity(result) self.assertIn("CPU:11", result.device) return i + 1, result @def_function.function def _FunctionWithWhileLoop(): x = constant_op.constant(1.) with ops.device("CPU:10"): _, z = while_loop_v2( lambda i, _: i < 2, _LoopBody, [0, x]) return z # The test assertion runs at trace time. _FunctionWithWhileLoop.get_concrete_function() def testExternalControlDependencies(self): with ops.Graph().as_default(), self.test_session(): v = variables.Variable(1.) self.evaluate(v.initializer) op = v.assign_add(1.) def body_fn(i): # pylint: disable=invalid-name with ops.control_dependencies([op]): return i + 1 loop = while_loop_v2(lambda i: i < 1, body_fn, [0]) loop[0].op.run() self.assertAllEqual(self.evaluate(v), 2.0) @test_util.run_deprecated_v1 def testMultipleLoopVarsBasic(self): x = constant_op.constant(5.) y = constant_op.constant(3.) # x = 5. # y = 3. # while x < 45.: # x = x * y ret = while_loop_v2( lambda v, _: v < 45., lambda v, w: (v * w, w), [x, y], return_same_structure=False) # ret = [x*y^2, y] # Note: This is simply d_ret[0]/d_x since d_ret[1]/d_x is 0. grad = gradients_impl.gradients(ret, [x]) # [2*x*y] with self.cached_session(): self.assertSequenceEqual(self.evaluate(ret), [45., 3.]) self.assertSequenceEqual(self.evaluate(grad), [9.]) @test_util.run_deprecated_v1 def testMultipleLoopNonscalarCond(self): x = constant_op.constant([[5.]]) y = constant_op.constant(3.) # x = 5. # y = 3. # while x < 45.: # x = x * y ret = while_loop_v2( lambda v, _: v < 45., lambda v, w: (v * w, w), [x, y], return_same_structure=False) # ret == [x*y^2, y] # Note: This is simply d_ret[0]/d_x since d_ret[1]/d_x is 0. grad = gradients_impl.gradients(ret, [x]) # [2*x*y] with self.cached_session(): self.assertSequenceEqual(self.evaluate(ret), [45., 3.]) self.assertSequenceEqual(self.evaluate(grad), [9.]) @test_util.run_deprecated_v1 def testMultipleLoopVars(self): x = constant_op.constant(5.) y = constant_op.constant(3.) # x = 5. # y = 3. # while x < 45.: # x = x * y # y = x + y ret = while_loop_v2( lambda v, _: v < 45., lambda v, w: (v * w, v + w), [x, y], return_same_structure=False) # ret = [y*x**2 + x*y**2, x*y + x + y] gradx_0 = gradients_impl.gradients(ret[0], [x]) # [2*x*y + y**2] gradx_1 = gradients_impl.gradients(ret[1], [x]) # [y + 1] gradx_2 = gradients_impl.gradients(ret, [x]) # [2*x*y + y**2 + 2*y + 1] grady_0 = gradients_impl.gradients(ret[0], [y]) # [2*x*y + x**2] grady_1 = gradients_impl.gradients(ret[1], [y]) # [x + 1] grady_2 = gradients_impl.gradients(ret, [y]) # [2*x*y + x**2 + x + 1] with self.cached_session(): self.assertSequenceEqual(self.evaluate(ret), [120., 23.]) self.assertSequenceEqual(self.evaluate(gradx_0), [39.]) self.assertSequenceEqual(self.evaluate(gradx_1), [4.]) self.assertSequenceEqual(self.evaluate(gradx_2), [43.]) self.assertSequenceEqual(self.evaluate(grady_0), [55.]) self.assertSequenceEqual(self.evaluate(grady_1), [6.]) self.assertSequenceEqual(self.evaluate(grady_2), [61.]) @test_util.run_deprecated_v1 def testGradientTape(self): with backprop.GradientTape() as t: x = constant_op.constant(2.) t.watch(x) ret = while_loop_v2( lambda v: v < 4., lambda v: v * v, [x], return_same_structure=False) # x**2 grad = t.gradient(ret, x) with self.cached_session() as sess: self.assertAllEqual(sess.run(grad), 4.0) @test_util.run_deprecated_v1 def testMultipleWhileLoops(self): x = constant_op.constant(2.) ret1 = while_loop_v2( lambda v: v < 4., lambda v: v * v, [x], return_same_structure=False) # x**2 ret2 = while_loop_v2( lambda v: v < 16., lambda v: v * v, [ret1], return_same_structure=False) # x**4 grad = gradients_impl.gradients(ret2, [x]) # 4x**3 grad_grad = gradients_impl.gradients(grad, [x]) # 12x**2 with self.cached_session(): self.assertSequenceEqual(self.evaluate(grad), [32.]) self.assertSequenceEqual(self.evaluate(grad_grad), [48.]) def testMultipleWhileLoopsWithFunc(self): x = constant_op.constant(2.) @def_function.function def Fn(): ret1 = while_loop_v2( lambda v: v < 4., lambda v: v * v, [x], return_same_structure=False, name="while_1") # x**2 ret2 = while_loop_v2( lambda v: v < 16., lambda v: v * v, [x], return_same_structure=False, name="while_2") # x**4 return ret1, ret2 concrete_fn = Fn.get_concrete_function() while_1 = concrete_fn.graph.get_operation_by_name("while_1") while_2 = concrete_fn.graph.get_operation_by_name("while_2") self.assertEqual(while_1.type, "StatelessWhile") self.assertEqual(while_2.type, "StatelessWhile") self.assertEmpty(while_1.control_inputs) self.assertEmpty(while_2.control_inputs) def testMultipleWhileLoopsGradStateless(self): @def_function.function def Fn(): x = constant_op.constant(2.) with backprop.GradientTape() as tape: tape.watch(x) ret1 = while_loop_v2( lambda v: v < 4., lambda v: v * v, [x], return_same_structure=False, name="while_1") # x**2 ret2 = while_loop_v2( lambda v: v < 16., lambda v: v * v, [x], return_same_structure=False, name="while_2") # x**4 loss = ret1 + ret2 return tape.gradient(loss, x) graph = Fn.get_concrete_function().graph while_ops = [op for op in graph.get_operations() if "While" in op.type] self.assertAllEqual([op.type for op in while_ops], ["StatelessWhile"] * 4, "Must have exactly 4 StatelessWhile ops.") for op in while_ops: self.assertEmpty(op.control_inputs, "{} should not have any control inputs".format(op.name)) def testMultipleWhileLoopsWithDeps(self): x = variables.Variable(2.) c = constant_op.constant(2.) @def_function.function def Fn(): def Body1(v): x.assign(x) return v * x ret1 = while_loop_v2( lambda v: v < 4., Body1, [c], return_same_structure=False, name="while_1") # 2x def Body2(v): x.assign(x) return v * x * x ret2 = while_loop_v2( lambda v: v < 16., Body2, [c], return_same_structure=False, name="while_2") # 4x return ret1, ret2 concrete_fn = Fn.get_concrete_function() while_1 = concrete_fn.graph.get_operation_by_name("while_1") while_2 = concrete_fn.graph.get_operation_by_name("while_2") self.assertEqual(while_1.type, "While") self.assertEqual(while_2.type, "While") self.assertEmpty(while_1.control_inputs) self.assertLen(while_2.control_inputs, 1) self.assertIs(while_2.control_inputs[0], while_1) def testMultipleWhileLoopsWithVarsDeps(self): x1 = variables.Variable(2.) x2 = variables.Variable(3.) c = constant_op.constant(2.) @def_function.function def Fn(): def Body1(v): x1.assign(x1) return v * x1 ret1 = while_loop_v2( lambda v: v < 4., Body1, [c], return_same_structure=False, name="while_1") # 2x def Body2(v): x1.assign(x1) return v * x1 * x1 ret2 = while_loop_v2( lambda v: v < 16., Body2, [c], return_same_structure=False, name="while_2") # 4x def Body3(v): x2.assign(x2) return v * x2 ret3 = while_loop_v2( lambda v: v < 4., Body3, [c], return_same_structure=False, name="while_3") # 3x def Body4(v): x2.assign(x2) return v * x2 * x2 ret4 = while_loop_v2( lambda v: v < 16., Body4, [c], return_same_structure=False, name="while_4") # 9x ret5 = while_loop_v2( lambda v: v < 16., lambda v: v * v, [c], return_same_structure=False, name="while_stateless") # x**2 return ret1, ret2, ret3, ret4, ret5 concrete_fn = Fn.get_concrete_function() while_1 = concrete_fn.graph.get_operation_by_name("while_1") while_2 = concrete_fn.graph.get_operation_by_name("while_2") while_3 = concrete_fn.graph.get_operation_by_name("while_3") while_4 = concrete_fn.graph.get_operation_by_name("while_4") while_stateless = concrete_fn.graph.get_operation_by_name( "while_stateless") self.assertEqual(while_1.type, "While") self.assertEqual(while_2.type, "While") self.assertEqual(while_3.type, "While") self.assertEqual(while_4.type, "While") self.assertEqual(while_stateless.type, "StatelessWhile") self.assertEmpty(while_1.control_inputs) self.assertLen(while_2.control_inputs, 1) self.assertIs(while_2.control_inputs[0], while_1) self.assertEmpty(while_3.control_inputs) self.assertLen(while_4.control_inputs, 1) self.assertIs(while_4.control_inputs[0], while_3) self.assertEmpty(while_stateless.control_inputs) @test_util.run_deprecated_v1 def testDoubleDerivative(self): x = constant_op.constant(2.) ret = while_loop_v2( lambda v: v < 8., lambda v: v**2, [x], return_same_structure=False) # x**4 grad = gradients_impl.gradients(ret, [x]) # 4x**3 grad_grad = gradients_impl.gradients(grad, [x]) # 12x**2 with self.cached_session(): self.assertEqual(self.evaluate(ret), 16.) self.assertSequenceEqual(self.evaluate(grad), [32.]) self.assertSequenceEqual(self.evaluate(grad_grad), [48.]) @test_util.run_v2_only def testMultipleWhileLoopsEager(self): @def_function.function def Func(): x = constant_op.constant(2.) ret1 = while_loop_v2( lambda v: v < 4., lambda v: v * v, [x], return_same_structure=False) # x**2 ret2 = while_loop_v2( lambda v: v < 16., lambda v: v * v, [ret1], return_same_structure=False) # x**4 grad = gradients_impl.gradients(ret2, [x])[0] # 4x**3 grad_grad = gradients_impl.gradients(grad, [x])[0] # 12x**2 return grad, grad_grad grad, grad_grad = Func() self.assertEqual(grad.numpy(), 32.) self.assertEqual(grad_grad.numpy(), 48.) @test_util.run_v2_only def testDoubleDerivativeEager(self): @def_function.function def Func(): x = constant_op.constant(2.) ret = while_loop_v2( lambda v: v < 8., lambda v: v**2, [x], return_same_structure=False) # x**4 grad = gradients_impl.gradients(ret, [x])[0] # 4x**3 grad_grad = gradients_impl.gradients(grad, [x])[0] # 12x**2 return ret, grad, grad_grad ret, grad, grad_grad = Func() self.assertEqual(ret.numpy(), 16.) self.assertEqual(grad.numpy(), 32.) self.assertEqual(grad_grad.numpy(), 48.) def _testPruning(self): x = constant_op.constant(1) tensor_list = list_ops.empty_tensor_list( element_dtype=x.dtype, element_shape=x.shape) def Cond(x, tl): del tl # Unused for Cond. return x < 5 def Body(x, tl): return x + 1, list_ops.tensor_list_push_back(tl, x) outputs = while_loop.while_loop(Cond, Body, [x, tensor_list]) train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(outputs[0]) g = GetOptimizedGraph() # TODO(b/136034023): while_v2 adds an extra loop_counter which is not pruned # away, causing an extra Enter node. enter_count = 2 if control_flow_util.ENABLE_CONTROL_FLOW_V2 else 1 self.assertLen([n for n in g.node if n.op == "Enter"], enter_count) # Test that the TensorList is pruned out. self.assertEmpty([ n for n in g.node if n.op == "Enter" and n.attr["T"].type == dtypes.variant.as_datatype_enum ]) self.assertEmpty([n for n in g.node if n.op == "TensorListPushBack"]) stack = list_ops.tensor_list_stack(outputs[1], element_dtype=x.dtype) train_op.append(stack) g = GetOptimizedGraph() # TODO(b/136034023): while_v2 adds an extra loop_counter which is not pruned # away, causing an extra Enter node. enter_count = 3 if control_flow_util.ENABLE_CONTROL_FLOW_V2 else 2 self.assertLen([n for n in g.node if n.op == "Enter"], enter_count) # Test that the TensorList is not pruned out. self.assertNotEmpty([ n for n in g.node if n.op == "Enter" and n.attr["T"].type == dtypes.variant.as_datatype_enum ]) self.assertNotEmpty([n for n in g.node if n.op == "TensorListPushBack"]) @test_util.run_deprecated_v1 def testPruningV1(self): self._testPruning() @test_util.enable_control_flow_v2 @test_util.run_deprecated_v1 def testPruningV2(self): self._testPruning() def _testDoNotAccumulateInvariants(self): push_op = ("TensorListPushBack" if control_flow_v2_toggles.control_flow_v2_enabled() else "StackPushV2") # Tests that loop invariants, i.e., tensors that are "captured" by the # while loop and not passed as loop variables are not accumulated in # gradient computation. v = constant_op.constant(5.0, name="v") r = while_loop.while_loop( lambda _: True, lambda x: v * x, [1.0], maximum_iterations=5) output = gradients_impl.gradients(r, v)[0] train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(output) g = GetOptimizedGraph() # The gradient for v * x requires the value of both v and x. Since v is a # loop invariant it is not accumulated so we have just one accumulator for # x. self.assertLen([n for n in g.node if n.op == push_op], 1) @test_util.run_deprecated_v1 def testDoNotAccumulateInvariantsV1(self): self._testDoNotAccumulateInvariants() @test_util.run_deprecated_v1 @test_util.enable_control_flow_v2 def testDoNotAccumulateInvariantsV2(self): self._testDoNotAccumulateInvariants() @test_util.enable_control_flow_v2 @test_util.run_deprecated_v1 @test_util.enable_output_all_intermediates def testPruningNested(self): assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE x = constant_op.constant(0) tensor_list = list_ops.empty_tensor_list( element_dtype=x.dtype, element_shape=x.shape) def Cond(x, tl): del tl # Unused for Cond. return x < 25 def Body(x, tl): def InnerCond(inner_x, unused_outer_x, unused_tl): return inner_x < 5 def InnerBody(inner_x, outer_x, tl): return inner_x + 1, outer_x + 1, list_ops.tensor_list_push_back(tl, x) inner_x = constant_op.constant(0) return while_loop.while_loop(InnerCond, InnerBody, [inner_x, x, tl])[1:] outputs = while_loop.while_loop(Cond, Body, [x, tensor_list]) train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(outputs[0]) g = GetOptimizedGraph() # TODO(b/136034023): while_v2 adds an extra loop_counter which is not pruned # away, causing an extra Enter node. # enter_count = 4 if control_flow_util.ENABLE_CONTROL_FLOW_V2 else 2 # self.assertLen([n for n in g.node if n.op == "Enter"], enter_count) # Test that the TensorList is pruned out. self.assertEmpty([ n for n in g.node if n.op == "Enter" and n.attr["T"].type == dtypes.variant.as_datatype_enum ]) self.assertEmpty([n for n in g.node if n.op == "TensorListPushBack"]) self.assertEmpty([n for n in g.node if n.op == "_While"]) stack = list_ops.tensor_list_stack(outputs[1], element_dtype=x.dtype) train_op.append(stack) g = GetOptimizedGraph() # TODO(b/136034023): while_v2 adds an extra loop_counter which is not pruned # away, causing an extra Enter node. # enter_count = 3 if control_flow_util.ENABLE_CONTROL_FLOW_V2 else 2 # self.assertLen([n for n in g.node if n.op == "Enter"], enter_count) # Test that the TensorList is not pruned out. self.assertNotEmpty([ n for n in g.node if n.op == "Enter" and n.attr["T"].type == dtypes.variant.as_datatype_enum ]) self.assertNotEmpty([n for n in g.node if n.op == "TensorListPushBack"]) @test_util.enable_control_flow_v2 @test_util.run_deprecated_v1 @test_util.enable_output_all_intermediates def testPruningNested2(self): assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE v = constant_op.constant(5.0, name="v") p = array_ops.placeholder(dtype=dtypes.int32) def MidBodyBuilder(iterations): def MidBody(i, x): r = while_loop.while_loop( lambda *_: True, lambda i, x: (i + 1, math_ops.multiply(v, x, name="my_mul")), (0, x), maximum_iterations=iterations, name="inner") return (i + 1, gradients_impl.gradients(x + r[1], v)[0]) return MidBody def OuterBody(i, x): iterations = array_ops.size(p, name="iterations") return (i + 1, x + while_loop.while_loop( lambda *_: True, MidBodyBuilder(iterations), (0, x), maximum_iterations=iterations, name="mid")[1]) def CreateWhileLoop(): with ops.device("/cpu:0"): r = while_loop.while_loop( lambda *_: True, OuterBody, (0, 1.0), maximum_iterations=5, name="outer") return array_ops.identity(r[1]) output = CreateWhileLoop() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(output) g = GetOptimizedGraph() self.assertLen([n for n in g.node if n.op == "TensorListPushBack"], 1) @test_util.enable_control_flow_v2 @test_util.run_deprecated_v1 @test_util.enable_output_all_intermediates def testPruningNested3(self): assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE v = constant_op.constant(5.0, name="v") def CreateWhileLoop(): r = while_loop.while_loop( lambda _: True, lambda x: math_ops.multiply(v, x, name="my_mul"), [1.0], maximum_iterations=5, name="outer") return array_ops.identity(r) r = CreateWhileLoop() output = gradients_impl.gradients(r, v)[0] train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(output) g = GetOptimizedGraph() self.assertLen([n for n in g.node if n.op == "TensorListPushBack"], 1) def _assertNotAccumulated(self, while_op, index): """Asserts that `while_op` input at `index` is not accumulated.""" body_graph = while_v2._get_graph(while_op, "body", "_body_graph") placeholder = body_graph.inputs[index] self.assertNotIn("TensorListPushBack", [op.type for op in placeholder.consumers()]) @test_util.enable_control_flow_v2 @test_util.run_deprecated_v1 @test_util.enable_output_all_intermediates def testDoNotOutputLoopCounterAsIntermediate(self): assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE v = constant_op.constant(5.0, name="v") r = while_loop.while_loop( lambda _: True, lambda x: v * x, [1.0], maximum_iterations=5) # Skip over Identity. while_op = r.op.inputs[0].op self._assertNotAccumulated(while_op, 0) @test_util.enable_control_flow_v2 @test_util.run_deprecated_v1 @test_util.enable_output_all_intermediates def testDoNotOutputLoopInvariantAsIntermediate(self): assert control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE def GetInputIndex(op, tensor): for index, inp in enumerate(op.inputs): if inp is tensor: return index v = constant_op.constant(5.0, name="v") r = while_loop.while_loop( lambda _: True, lambda x: v * x, [1.0], maximum_iterations=5) # Skip over Identity. while_op = r.op.inputs[0].op # We can't directly use while_op.inputs.index() because Tensors are not # hashable. index = GetInputIndex(while_op, v) self._assertNotAccumulated(while_op, index) @test_util.run_deprecated_v1 def testCaptureExternalTensorInCond(self): x = constant_op.constant(2.) y = constant_op.constant(1.) ret = while_loop_v2( lambda v: v + y < 9., lambda v: v * 3., [x], return_same_structure=False) grad = gradients_impl.gradients(ret, [x]) with self.cached_session(): self.assertEqual(self.evaluate(ret), 18.) self.assertSequenceEqual(self.evaluate(grad), [9.]) @test_util.run_deprecated_v1 def testCaptureExternalTensorInBody(self): x = constant_op.constant(2.) y = constant_op.constant(3.) ret = while_loop_v2( lambda v: v < 8., lambda v: v * y, [x], return_same_structure=False) grad = gradients_impl.gradients(ret, [x]) with self.cached_session(): self.assertEqual(self.evaluate(ret), 18.) self.assertSequenceEqual(self.evaluate(grad), [9.]) @test_util.run_deprecated_v1 def testLoopWithTensorListPushBack(self): x = constant_op.constant(2.) tensor_list = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=ScalarShape()) def Cond(x, tl): del tl # Unused for Cond. return x < 5. def Body(x, tl): tl = list_ops.tensor_list_push_back(tl, x) tl = list_ops.tensor_list_push_back(tl, constant_op.constant(100.)) return x**2., tl ret = while_loop_v2( Cond, Body, [x, tensor_list], return_same_structure=False) grad = gradients_impl.gradients(ret[0], x) with self.cached_session() as sess: self.assertEqual(sess.run(ret[0]), 16.) self.assertSequenceEqual(self.evaluate(grad), [32.]) @test_util.run_deprecated_v1 def testDuplicateAccumulator(self): x = constant_op.constant(2.) tensor_list = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=ScalarShape()) def Cond(x, tl): del tl # Unused for Cond. return x < 5. def Body(x, tl): # There is an accumulator in the loop already so we should not add # another. tl = list_ops.tensor_list_push_back(tl, x) return x**2., tl ret = while_loop_v2( Cond, Body, [x, tensor_list], return_same_structure=False) for op in ops.get_default_graph().get_operations(): if op.type == "While" or op.type == "StatelessWhile": while_op = op body_graph = while_v2._get_graph(while_op, "body", "_body_graph") x_input_index = [i for i, inp in enumerate(while_op.inputs) if inp == x][0] x_input_t = body_graph.inputs[x_input_index] accumulator_count = len( [c for c in x_input_t.consumers() if c.type == "TensorListPushBack"]) self.assertEqual(accumulator_count, 1) grad = gradients_impl.gradients(ret[0], x) with self.cached_session() as sess: self.assertEqual(sess.run(ret[0]), 16.) self.assertSequenceEqual(self.evaluate(grad), [32.]) @parameterized.named_parameters( ("UnknownShape", None), ("PartiallyDefinedShape", [None, 2]), ("FullyDefinedShape", [1, 2]), ) @test_util.run_deprecated_v1 def testAccumulatorElementShape(self, shape): def MatchShape(actual_tensor_shape): # Compare the shapes, treating None dimensions as equal. We do not # directly check actual_tensor_shape and tf.TensorShape(shape) for # equality because tf.Dimension.__eq__ returns None if either dimension is # None. if shape is None: self.assertIsNone(actual_tensor_shape.dims) else: self.assertListEqual(actual_tensor_shape.as_list(), shape) def GetAccumulatorForInputAtIndex(while_op, idx): body_graph = while_v2._get_graph(while_op, "body", "_body_graph") y_input_t = body_graph.inputs[idx] push_back_node = [c for c in y_input_t.consumers() if c.type == "TensorListPushBack"][0] output_idx = body_graph.outputs.index(push_back_node.outputs[0]) return while_op.outputs[output_idx] x = array_ops.placeholder(dtype=dtypes.float32, shape=shape) y = array_ops.placeholder(dtype=dtypes.float32, shape=shape) # Forward pass. ret = while_loop_v2(lambda v, u: v < 8., lambda v, u: (math_ops.pow(v, u), u), [x, y], return_same_structure=True) while_op = ret[0].op.inputs[0].op # Gradient pass. grad = gradients_impl.gradients(ret[0], x) # Note: There is an Identity b/w grad[0] and the While op. grad_while_op = grad[0].op.inputs[0].op # Get the TensorList output of While op containing the accumulated values # of y. x_input_index = [i for i, inp in enumerate(while_op.inputs) if x == inp][0] output = GetAccumulatorForInputAtIndex(while_op, x_input_index) _, val = list_ops.tensor_list_pop_back(output, element_dtype=dtypes.float32) MatchShape(val.shape) # Take second derivative to generate intermediate grad_while_op outputs gradients_impl.gradients(grad, x) # Get the TensorList output of gradient While op containing the accumulated # values of grad_x (note that grad_x is needed by the second derivative). # grad_while_op.inputs: grad_output_index = grad_while_op.outputs.index(grad[0].op.inputs[0]) grad_output = GetAccumulatorForInputAtIndex(grad_while_op, grad_output_index) _, val = list_ops.tensor_list_pop_back(grad_output, element_dtype=dtypes.float32) MatchShape(val.shape) def _createWhile(self, name): """Helper function testDefaultName.""" output = while_v2.while_loop( lambda i: i < 3, lambda i: i + 1, [constant_op.constant(0)], return_same_structure=False) while_op = output.op.inputs[0].op self.assertEqual(while_op.type, "StatelessWhile") return while_op def testDefaultName(self): with ops.Graph().as_default(): while_op = self._createWhile(None) self.assertEqual(while_op.name, "while") self.assertRegex(while_op.get_attr("cond").name, r"while_cond_\d*") self.assertRegex(while_op.get_attr("body").name, r"while_body_\d*") with ops.Graph().as_default(): with ops.name_scope("foo"): while1_op = self._createWhile("") self.assertEqual(while1_op.name, "foo/while") self.assertRegex(while1_op.get_attr("cond").name, r"foo_while_cond_\d*") self.assertRegex(while1_op.get_attr("body").name, r"foo_while_body_\d*") while2_op = self._createWhile(None) self.assertEqual(while2_op.name, "foo/while_1") self.assertRegex( while2_op.get_attr("cond").name, r"foo_while_1_cond_\d*") self.assertRegex( while2_op.get_attr("body").name, r"foo_while_1_body_\d*") @test_util.enable_control_flow_v2 @test_util.run_deprecated_v1 def testWhileAndTensorArray(self): param = constant_op.constant(2.0) y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems") # map_fn uses TensorArray internally. r = map_fn.map_fn(lambda x: math_ops.multiply(x, param), y0) grad = gradients_impl.gradients(r, param)[0] self.assertAllClose([2.0, 4.0, 6.0, 8.0, 10.0, 12.0], self.evaluate(r)) self.assertAllClose(21.0, self.evaluate(grad)) @test_util.run_deprecated_v1 def testNestedWhile(self): # Compute sum of geometric progression: n^0 + n^1 + ... + n^m # We compute the pow using a while loop. n = constant_op.constant(3.) m = constant_op.constant(5.) sum_of_powers = constant_op.constant(0.) def Body(i, previous_sum): prod = constant_op.constant(1.) return i - 1., previous_sum + while_loop_v2( lambda c, _: c > 0, lambda c, v: (c - 1., v * n), [i, prod], return_same_structure=False)[1] result = while_loop_v2( lambda i, _: i >= 0, Body, [m, sum_of_powers], return_same_structure=False)[1] grad = gradients_impl.gradients(result, [n]) self.assertEqual(self.evaluate(result), 364.) self.assertSequenceEqual(self.evaluate(grad), [547.]) @test_util.run_deprecated_v1 def testNestedWhileWithLegacyDefun(self): n = constant_op.constant(3.) m = constant_op.constant(5.) sum_of_powers = constant_op.constant(0.) def Body(i, previous_sum): prod = constant_op.constant(1.) def InnerBodyWrapper(c, v): @function.Defun(dtypes.float32, dtypes.float32) def InnerBody(c, v): return c - 1., v * n results = InnerBody(c, v) results[0].set_shape([]) results[1].set_shape([]) return results return i - 1., previous_sum + while_loop_v2( lambda c, _: c > 0, InnerBodyWrapper, [i, prod], return_same_structure=False)[1] result = while_loop_v2( lambda i, _: i >= 0, Body, [m, sum_of_powers], return_same_structure=False)[1] grad = gradients_impl.gradients(result, [n]) self.assertEqual(self.evaluate(result), 364.) self.assertSequenceEqual(self.evaluate(grad), [547.]) @test_util.run_deprecated_v1 def testIdentityNodeInBody(self): def Body(v): v = array_ops.identity(v) v = array_ops.identity(v) return v * v x = constant_op.constant(2.) ret = while_loop_v2( lambda v: v < 8., Body, [x], return_same_structure=False) grad = gradients_impl.gradients(ret, [x]) self.assertEqual(self.evaluate(ret), 16.) self.assertSequenceEqual(self.evaluate(grad), [32.]) @test_util.run_deprecated_v1 def testForwardPassRewrite(self): x = constant_op.constant(1.0, name="x") output = while_v2.while_loop(lambda x: x < 10.0, lambda x: x * 2.0, [x])[0] while_op = output.op.inputs[0].op self.assertEqual(while_op.type, "StatelessWhile") # outputs = [loop_counter, max_iters, x] self.assertLen(while_op.outputs, 3) gradients_impl.gradients(output, x) # while_op should have been rewritten to output intermediates. # outputs = [loop_counter, max_iters, x, x_accumulator] self.assertLen(while_op.outputs, 4) gradients_impl.gradients(output, x) # Computing the gradient again shouldn't rewrite while_op again. self.assertLen(while_op.outputs, 4) @parameterized.named_parameters( ("RandomUniform", random_ops.random_uniform, [5, 3]), ("RandomNormal", random_ops.random_normal, [5, 3]), ("ParameterizedTruncatedNormal", random_ops.parameterized_truncated_normal, [5, 3]), ("TruncatedNormal", random_ops.truncated_normal, [5, 3]), ("RandomGamma", random_gamma, [5, 3]), ("RandomPoissonV2", random_poisson_v2, [5, 3]), ("RandomGammaWithAlphaBeta", random_gamma_with_alpha_beta, [5, 3, 4, 2]), ("RandomPoissonV2WithLam", random_poisson_v2_with_lam, [5, 3, 2]), ) @test_util.run_deprecated_v1 def testRandomOpsShape(self, random_fn, expected_shape): shape = constant_op.constant([3]) def Body(i, u): shape_extended = array_ops.concat([[5], shape], axis=0) u = random_fn(shape_extended) assert u.shape.as_list() == expected_shape, str(u.shape.as_list()) return i + 1, u _, _ = while_loop_v2( cond=lambda i, _: i < 3, body=Body, loop_vars=[ 0, array_ops.zeros(expected_shape, dtype=dtypes.float32), ]) @test_util.run_deprecated_v1 def testReshapeShape(self): shape = constant_op.constant([3, 4]) def Body(i, u): shape_extended = array_ops.concat([[5], shape], axis=0) u = array_ops.reshape(u, [-1]) assert u.shape.as_list() == [60], str(u.shape.as_list()) u = array_ops.reshape(u, shape_extended) assert u.shape.as_list() == [5, 3, 4], str(u.shape.as_list()) return i + 1, u _, _ = while_loop_v2( cond=lambda i, _: i < 3, body=Body, loop_vars=[ 0, array_ops.zeros([5, 3, 4], dtype=dtypes.float32), ]) @parameterized.named_parameters( ("Zeros", array_ops.zeros), ("Ones", array_ops.ones), ("Fill", fill), ) @test_util.run_deprecated_v1 def testFillOpsShape(self, fill_fn): shape = constant_op.constant([3, 4]) def Body(i, u): shape_extended = array_ops.concat([[5], shape], axis=0) u = fill_fn(shape_extended) assert u.shape.as_list() == [5, 3, 4], str(u.shape.as_list()) return i + 1, u _, _ = while_loop_v2( cond=lambda i, _: i < 3, body=Body, loop_vars=[ 0, array_ops.zeros([5, 3, 4], dtype=dtypes.float32), ]) @test_util.run_deprecated_v1 def testExternalColocationGrad(self): external_t = constant_op.constant(2.) v0 = constant_op.constant(2.) def Body(v): with ops.colocate_with(external_t): return v * v ret = while_loop_v2(lambda v: v < 8., Body, [v0])[0] grad = gradients_impl.gradients(ret, [v0])[0] self.assertAllEqual(ret, 16.) self.assertAllEqual(grad, 32.) @test_util.run_deprecated_v1 def testDoNotAccumulateConstNodes(self): def Body(v): return v * 2.0 v0 = constant_op.constant(2.) ret = while_loop_v2(lambda v: v < 8., Body, [v0])[0] # Gradients computation has the side-effect of updating the forward op # which is what we want to test. unused_grad = gradients_impl.gradients(ret, [v0])[0] # ret is separated from the `While` op by an `Identity` so we skip over # that. forward_while_op = ret.op.inputs[0].op body_graph = while_v2._get_graph(forward_while_op, "body", "_body_graph") push_back_nodes = [ o for o in body_graph.get_operations() if o.type == "TensorListPushBack" ] # Gradient of `Mul` requires accumulating both its inputs. But since one # of those is a Const (2.0), we should have just one accumulator. self.assertLen(push_back_nodes, 1) def testDoNotAccumulateForwardTensorsForReductionOps(self): @def_function.function def Fn(): with backprop.GradientTape() as tape: x = constant_op.constant(2.) tape.watch(x) def Body(i, x): forward_graph = ops.get_default_graph() @custom_gradient.custom_gradient def SquaredWithZeroGrad(x): def Grad(unused_g, variables=None): # pylint: disable=redefined-outer-name del variables gradient_graph = ops.get_default_graph() shape = gen_array_ops.shape(x) assert shape.graph is forward_graph rank = gen_array_ops.rank(x) assert rank.graph is forward_graph size = gen_array_ops.size(x) assert size.graph is forward_graph zeros = array_ops.zeros(shape) assert zeros.graph is gradient_graph return zeros return x * 2, Grad return i + 1, SquaredWithZeroGrad(x) _, result = while_loop_v2(lambda i, _: i < 2, Body, [0, x]) grad = tape.gradient(result, x) return grad Fn() def testDoNotAccumulateForwardTensorsForTensorListReductionOps(self): @def_function.function def Fn(): with backprop.GradientTape() as tape: e = constant_op.constant(2.) x = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=e.shape) x = list_ops.tensor_list_push_back(x, e) tape.watch(x) def Body(i, x): forward_graph = ops.get_default_graph() @custom_gradient.custom_gradient def IdentityWithZeroGrad(x): def Grad(unused_g, variables=None): # pylint: disable=redefined-outer-name del variables gradient_graph = ops.get_default_graph() shape = gen_list_ops.tensor_list_element_shape( x, shape_type=dtypes.int32) assert shape.graph is forward_graph size = gen_list_ops.tensor_list_length(x) assert size.graph is forward_graph zeros = gen_list_ops.tensor_list_reserve(shape, size, dtypes.float32) assert zeros.graph is gradient_graph return zeros return x, Grad return i + 1, IdentityWithZeroGrad(x) _, result = while_loop_v2(lambda i, _: i < 2, Body, [0, x]) ones_like = list_ops.tensor_list_from_tensor( array_ops.ones_like( list_ops.tensor_list_stack(result, element_dtype=dtypes.float32)), element_shape=tensor_shape.TensorShape([])) grad = tape.gradient(result, x, output_gradients=[ones_like]) return grad Fn() @test_util.run_v2_only def testInheritParentNameScope(self): @def_function.function def F(): with ops.name_scope("foo"): def Cond(unused_i): with ops.name_scope("cond"): actual_name_scope = ops.get_name_scope() expected_name_scope = "foo/while/cond" assert actual_name_scope == expected_name_scope, ( "%s does not match %s" % (actual_name_scope, expected_name_scope)) return False def Body(i): with ops.name_scope("body"): actual_name_scope = ops.get_name_scope() expected_name_scope = "foo/while/body" assert actual_name_scope == expected_name_scope, ( "%s does not match %s" % (actual_name_scope, expected_name_scope)) return i return while_v2.while_loop(Cond, Body, [0.]) F() @test_util.run_deprecated_v1 # Need to pass RunMetadata. def testDisableLowering(self): old = control_flow_util_v2._DISABLE_LOWER_USING_SWITCH_MERGE control_flow_util_v2._DISABLE_LOWER_USING_SWITCH_MERGE = True with self.session() as sess: x = constant_op.constant(2.) ret = while_loop_v2( lambda v: v < 8., lambda v: v * v, [x], return_same_structure=False) opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() self.assertEqual(sess.run(ret, options=opts, run_metadata=run_metadata), 16) for dev_stat in run_metadata.step_stats.dev_stats: for ns in dev_stat.node_stats: self.assertNotIn("switch", ns.node_name) control_flow_util_v2._DISABLE_LOWER_USING_SWITCH_MERGE = old def _runBasicWithConfig(self, config): with ops.device("/cpu:0"): x = constant_op.constant(0) ret, = while_loop_v2(lambda x: x < 1000, lambda x: x + 1, [x]) with self.cached_session(config=config): self.assertEqual(1000, self.evaluate(ret)) @test_util.run_deprecated_v1 def testRunKernelsInline(self): config = config_pb2.ConfigProto() config.inter_op_parallelism_threads = -1 self._runBasicWithConfig(config) @test_util.run_deprecated_v1 def testSingleThreadedExecution(self): config = config_pb2.ConfigProto() config.experimental.executor_type = "SINGLE_THREADED_EXECUTOR" self._runBasicWithConfig(config) def testIsControlFlowGraph(self): x = constant_op.constant(0) @def_function.function def F(c): def Cond(i): self.assertTrue(i.graph.is_control_flow_graph) return i < 2 def Body(i): i = i + 1 self.assertTrue(i.graph.is_control_flow_graph) return i return while_loop_v2(Cond, Body, [c]) ret, = F(x) self.assertEqual(2, self.evaluate(ret)) def testImportFromSerializedWithFunctionInBody(self): serialized = """node { name: "Const" op: "Const" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "value" value { tensor { dtype: DT_FLOAT tensor_shape { } float_val: 1.0 } } } } node { name: "while/maximum_iterations" op: "Const" attr { key: "dtype" value { type: DT_INT32 } } attr { key: "value" value { tensor { dtype: DT_INT32 tensor_shape { } int_val: -1 } } } } node { name: "while/loop_counter" op: "Const" attr { key: "dtype" value { type: DT_INT32 } } attr { key: "value" value { tensor { dtype: DT_INT32 tensor_shape { } int_val: 0 } } } } node { name: "while" op: "StatelessWhile" input: "while/loop_counter" input: "while/maximum_iterations" input: "Const" attr { key: "T" value { list { type: DT_INT32 type: DT_INT32 type: DT_FLOAT } } } attr { key: "_lower_using_switch_merge" value { b: true } } attr { key: "_num_original_outputs" value { i: 3 } } attr { key: "_read_only_resource_inputs" value { list { } } } attr { key: "body" value { func { name: "while_body_822" } } } attr { key: "cond" value { func { name: "while_cond_821" } } } attr { key: "output_shapes" value { list { shape { } shape { } shape { } } } } attr { key: "parallel_iterations" value { i: 10 } } } node { name: "while/Identity" op: "Identity" input: "while" attr { key: "T" value { type: DT_INT32 } } } node { name: "while/Identity_1" op: "Identity" input: "while:1" attr { key: "T" value { type: DT_INT32 } } } node { name: "while/Identity_2" op: "Identity" input: "while:2" attr { key: "T" value { type: DT_FLOAT } } } library { function { signature { name: "while_body_822" input_arg { name: "while_loop_counter" type: DT_INT32 } input_arg { name: "while_maximum_iterations_0" type: DT_INT32 } input_arg { name: "placeholder" type: DT_FLOAT } output_arg { name: "add" type: DT_INT32 } output_arg { name: "while_maximum_iterations" type: DT_INT32 } output_arg { name: "partitionedcall" type: DT_FLOAT } } node_def { name: "PartitionedCall" op: "PartitionedCall" input: "placeholder" attr { key: "Tin" value { list { type: DT_FLOAT } } } attr { key: "Tout" value { list { type: DT_FLOAT } } } attr { key: "_collective_manager_ids" value { list { } } } attr { key: "_read_only_resource_inputs" value { list { } } } attr { key: "config" value { s: "" } } attr { key: "config_proto" value { s: "" } } attr { key: "executor_type" value { s: "" } } attr { key: "f" value { func { name: "__inference_f_841" } } } experimental_debug_info { original_node_names: "PartitionedCall" } } node_def { name: "add/y" op: "Const" attr { key: "dtype" value { type: DT_INT32 } } attr { key: "value" value { tensor { dtype: DT_INT32 tensor_shape { } int_val: 1 } } } experimental_debug_info { original_node_names: "add/y" } } node_def { name: "add_0" op: "AddV2" input: "while_loop_counter" input: "add/y:output:0" attr { key: "T" value { type: DT_INT32 } } experimental_debug_info { original_node_names: "add" } } ret { key: "add" value: "add_0:z:0" } ret { key: "partitionedcall" value: "PartitionedCall:output:0" } ret { key: "while_maximum_iterations" value: "while_maximum_iterations_0" } arg_attr { key: 0 value { attr { key: "_output_shapes" value { list { shape { } } } } } } arg_attr { key: 1 value { attr { key: "_output_shapes" value { list { shape { } } } } } } arg_attr { key: 2 value { attr { key: "_output_shapes" value { list { shape { } } } } } } } function { signature { name: "while_cond_821" input_arg { name: "while_loop_counter" type: DT_INT32 } input_arg { name: "while_maximum_iterations" type: DT_INT32 } input_arg { name: "placeholder" type: DT_FLOAT } output_arg { name: "less" type: DT_BOOL } } node_def { name: "Less/y" op: "Const" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "value" value { tensor { dtype: DT_FLOAT tensor_shape { } float_val: 5.0 } } } experimental_debug_info { original_node_names: "Less/y" } } node_def { name: "Less" op: "Less" input: "placeholder" input: "Less/y:output:0" attr { key: "T" value { type: DT_FLOAT } } experimental_debug_info { original_node_names: "Less" } } ret { key: "less" value: "Less:z:0" } arg_attr { key: 0 value { attr { key: "_output_shapes" value { list { shape { } } } } } } arg_attr { key: 1 value { attr { key: "_output_shapes" value { list { shape { } } } } } } arg_attr { key: 2 value { attr { key: "_output_shapes" value { list { shape { } } } } } } } function { signature { name: "__inference_f_841" input_arg { name: "mul_placeholder" type: DT_FLOAT } output_arg { name: "identity" type: DT_FLOAT } } node_def { name: "mul/y" op: "Const" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "value" value { tensor { dtype: DT_FLOAT tensor_shape { } float_val: 2.0 } } } experimental_debug_info { original_node_names: "mul/y" } } node_def { name: "mul" op: "Mul" input: "mul_placeholder" input: "mul/y:output:0" attr { key: "T" value { type: DT_FLOAT } } experimental_debug_info { original_node_names: "mul" } } node_def { name: "Identity" op: "Identity" input: "mul:z:0" attr { key: "T" value { type: DT_FLOAT } } experimental_debug_info { original_node_names: "Identity" } } ret { key: "identity" value: "Identity:output:0" } arg_attr { key: 0 value { attr { key: "_output_shapes" value { list { shape { } } } } } } } } versions { producer: 399 min_consumer: 12 } """ # Code for generating above graph: # # def Body(i): # @tf.function # def f(): # return i * 2 # return f() # tf.while_loop(lambda i: i < 5., Body, [tf.constant(1.)]) graph_def = graph_pb2.GraphDef() text_format.Parse(serialized, graph_def) @def_function.function def F(): x, y = importer.import_graph_def( graph_def, return_elements=["Const:0", "while:2"]) grad_out, = gradients_impl.gradients(y, x) return grad_out self.assertAllEqual(F(), 8.0) def testIndexedSlicesInIncomingGrads(self): @def_function.function def F(): x = constant_op.constant([2.]) # Computes x^4 ret = while_loop_v2( lambda _: True, lambda v: v * v, [x], return_same_structure=False, maximum_iterations=2) v = array_ops.gather(ret, [0]) return gradients_impl.gradients(v, [x])[0] # 4*x^3 self.assertAllEqual(self.evaluate(F()), [32.]) def testShapeInvariantsRaggedTensor(self): @def_function.function def TestFn(x): _, ret = while_loop_v2( lambda i, _: i < 1, lambda i, y: (i + 1, array_ops.concat([y, y], axis=0)), [0, x], shape_invariants=[ tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32), ragged_tensor.RaggedTensorSpec(shape=[None, None])], ) return ret x = ragged_factory_ops.constant([[1., 2.], [3.]]) result = TestFn(x) expected_result = [[1., 2.], [3.], [1., 2.], [3.]] self.assertAllEqual(result, expected_result) def ScalarShape(): return ops.convert_to_tensor([], dtype=dtypes.int32) def GetOptimizedGraph(): mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) config = config_pb2.ConfigProto() config.graph_options.rewrite_options.CopyFrom( rewriter_config_pb2.RewriterConfig( constant_folding=rewriter_config_pb2.RewriterConfig.OFF, memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)) return tf_optimizer.OptimizeGraph(config, mg) if __name__ == "__main__": test.main()
WhileV2Test
python
getsentry__sentry
src/sentry/release_health/metrics_sessions_v2.py
{ "start": 3657, "end": 6998 }
class ____(ABC): def __init__( self, name: str, raw_groupby: Sequence[str], status_filter: StatusFilter, ): self.name = name self._raw_groupby = raw_groupby self._status_filter = status_filter self._hidden_fields: set[MetricField] = set() self.metric_fields = self._get_metric_fields(raw_groupby, status_filter) @abstractmethod def _get_session_status(self, metric_field: MetricField) -> SessionStatus | None: ... @abstractmethod def _get_metric_fields( self, raw_groupby: Sequence[str], status_filter: StatusFilter ) -> Sequence[MetricField]: ... def extract_values( self, input_groups: GroupedData, output_groups: GroupedData, ) -> None: for metric_field in self.metric_fields: session_status = self._get_session_status(metric_field) if metric_field in self._hidden_fields: # We fetched this only to get a consistent sort order # in the original implementation, don't add it to output data continue field_name = ( f"{metric_field.op}({get_public_name_from_mri(metric_field.metric_mri)})" if metric_field.op else get_public_name_from_mri(metric_field.metric_mri) ) for input_group_key, group in input_groups.items(): if session_status and not self._status_filter: self.ensure_status_groups(input_group_key, output_groups) group_key = replace(input_group_key, session_status=session_status) for subgroup in ("totals", "series"): target = output_groups[group_key][subgroup] previous_value = target[self.name] value = group[subgroup][field_name] if isinstance(value, list): value = [ self.accumulate(prev, self.normalize(x)) for prev, x in zip(previous_value, value) ] else: value = self.accumulate(previous_value, self.normalize(value)) target[self.name] = value def ensure_status_groups(self, input_group_key: GroupKey, output_groups: GroupedData) -> None: # To be consistent with original sessions implementation, # always create defaults for all session status groups for session_status in SessionStatus: group_key = replace(input_group_key, session_status=session_status) output_groups[group_key] # creates entry in defaultdict def get_groupby(self) -> Iterable[str]: for groupby in self._raw_groupby: if groupby == "session.status": continue elif groupby == "project": yield "project_id" else: yield groupby def normalize(self, value: Scalar) -> Scalar: return cast(Scalar, finite_or_none(value)) def accumulate(self, old_value: Scalar, new_value: Scalar) -> Scalar: """Combine two numbers for the same target. Default is the new value""" return new_value UNSORTABLE = {SessionStatus.HEALTHY, SessionStatus.ERRORED}
Field