language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
tensorflow__tensorflow
tensorflow/python/data/experimental/kernel_tests/optimization/seq_interleave_prefetch_test.py
{ "start": 1156, "end": 3785 }
class ____( test_base.DatasetTestBase, parameterized.TestCase ): @combinations.generate( combinations.times( test_base.eager_only_combinations(), combinations.combine(cycle_length=[2, 4]), combinations.combine(block_length=[2, 4]), combinations.combine(other_arguments=[True, False]), ) ) def testOptimizationSeqInterleavePrefetch( self, cycle_length, block_length, other_arguments, ): num_input_elements = 16 var1 = constant_op.constant(9, dtype=dtypes.int64) var2 = constant_op.constant(11, dtype=dtypes.int64) # dataset1: Deterministic parallel interleave dataset. dataset1 = dataset_ops.Dataset.range(num_input_elements) options1 = options_lib.Options() options1.experimental_optimization.apply_default_optimizations = False options1.experimental_optimization.seq_interleave_prefetch = False dataset1 = dataset1.with_options(options1) if other_arguments: dataset1 = dataset1.interleave( (lambda _: dataset_ops.Dataset.range(var1 + var2 + 1)), cycle_length=cycle_length, block_length=block_length, num_parallel_calls=dataset_ops.AUTOTUNE, deterministic=True, ) else: dataset1 = dataset1.interleave( (lambda _: dataset_ops.Dataset.range(num_input_elements)), cycle_length=cycle_length, block_length=block_length, num_parallel_calls=dataset_ops.AUTOTUNE, deterministic=True, ) # dataset2: Deterministic parallel interleave dataset with # `seq_interleave_prefetch` optimization enabled. dataset2 = dataset_ops.Dataset.range(num_input_elements) options2 = options_lib.Options() options2.experimental_optimization.apply_default_optimizations = False options2.experimental_optimization.seq_interleave_prefetch = True dataset2 = dataset2.with_options(options2) if other_arguments: dataset2 = dataset2.interleave( (lambda _: dataset_ops.Dataset.range(var1 + var2 + 1)), cycle_length=cycle_length, block_length=block_length, num_parallel_calls=dataset_ops.AUTOTUNE, deterministic=True, ) else: dataset2 = dataset2.interleave( (lambda _: dataset_ops.Dataset.range(num_input_elements)), cycle_length=cycle_length, block_length=block_length, num_parallel_calls=dataset_ops.AUTOTUNE, deterministic=True, ) self.assertDatasetsEqual(dataset1, dataset2) if __name__ == "__main__": test.main()
SeqInterleavePrefetchTest
python
zarr-developers__zarr-python
src/zarr/storage/_memory.py
{ "start": 5502, "end": 7886 }
class ____(MemoryStore): """ Store for GPU memory. Stores every chunk in GPU memory irrespective of the original location. The dictionary of buffers to initialize this memory store with *must* be GPU Buffers. Writing data to this store through ``.set`` will move the buffer to the GPU if necessary. Parameters ---------- store_dict : MutableMapping, optional A mutable mapping with string keys and [zarr.core.buffer.gpu.Buffer][] values. read_only : bool Whether to open the store in read-only mode. """ _store_dict: MutableMapping[str, gpu.Buffer] # type: ignore[assignment] def __init__( self, store_dict: MutableMapping[str, gpu.Buffer] | None = None, *, read_only: bool = False, ) -> None: super().__init__(store_dict=store_dict, read_only=read_only) # type: ignore[arg-type] def __str__(self) -> str: return f"gpumemory://{id(self._store_dict)}" def __repr__(self) -> str: return f"GpuMemoryStore('{self}')" @classmethod def from_dict(cls, store_dict: MutableMapping[str, Buffer]) -> Self: """ Create a GpuMemoryStore from a dictionary of buffers at any location. The dictionary backing the newly created ``GpuMemoryStore`` will not be the same as ``store_dict``. Parameters ---------- store_dict : mapping A mapping of strings keys to arbitrary Buffers. The buffer data will be moved into a [`gpu.Buffer`][zarr.core.buffer.gpu.Buffer]. Returns ------- GpuMemoryStore """ gpu_store_dict = {k: gpu.Buffer.from_buffer(v) for k, v in store_dict.items()} return cls(gpu_store_dict) async def set(self, key: str, value: Buffer, byte_range: tuple[int, int] | None = None) -> None: # docstring inherited self._check_writable() assert isinstance(key, str) if not isinstance(value, Buffer): raise TypeError( f"GpuMemoryStore.set(): `value` must be a Buffer instance. Got an instance of {type(value)} instead." ) # Convert to gpu.Buffer gpu_value = value if isinstance(value, gpu.Buffer) else gpu.Buffer.from_buffer(value) await super().set(key, gpu_value, byte_range=byte_range)
GpuMemoryStore
python
allegroai__clearml
clearml/backend_api/services/v2_23/datasets.py
{ "start": 29512, "end": 30866 }
class ____(NonStrictDataModel): """ :param count: Item name :type count: int :param name: Number of appearances :type name: str """ _schema = { "properties": { "count": {"description": "Item name", "type": ["integer", "null"]}, "name": { "description": "Number of appearances", "type": ["string", "null"], }, }, "type": "object", } def __init__(self, count=None, name=None, **kwargs): super(StatCount, self).__init__(**kwargs) self.count = count self.name = name @schema_property("count") def count(self): return self._property_count @count.setter def count(self, value): if value is None: self._property_count = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "count", six.integer_types) self._property_count = value @schema_property("name") def name(self): return self._property_name @name.setter def name(self, value): if value is None: self._property_name = None return self.assert_isinstance(value, "name", six.string_types) self._property_name = value
StatCount
python
huggingface__transformers
src/transformers/models/lilt/modeling_lilt.py
{ "start": 4963, "end": 7825 }
class ____(nn.Module): def __init__(self, config): super().__init__() # we divide the hidden_size by 6 here as there are 6 different layout embeddings, # namely left_position, upper_position, right_position, lower_position, height, width self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.padding_idx = config.pad_token_id self.box_position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size // config.channel_shrink_ratio, padding_idx=self.padding_idx, ) self.box_linear_embeddings = nn.Linear( in_features=config.hidden_size, out_features=config.hidden_size // config.channel_shrink_ratio ) self.LayerNorm = nn.LayerNorm(config.hidden_size // config.channel_shrink_ratio, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, bbox=None, position_ids=None): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings) box_position_embeddings = self.box_position_embeddings(position_ids) spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings) spatial_position_embeddings = self.dropout(spatial_position_embeddings) return spatial_position_embeddings
LiltLayoutEmbeddings
python
django__django
django/db/models/expressions.py
{ "start": 32039, "end": 33261 }
class ____(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False contains_over_clause = False def as_sql(self, *args, **kwargs): raise ValueError( "This queryset contains a reference to an outer query and may " "only be used in a subquery." ) def resolve_expression(self, *args, **kwargs): col = super().resolve_expression(*args, **kwargs) if col.contains_over_clause: raise NotSupportedError( f"Referencing outer query window expression is not supported: " f"{self.name}." ) # FIXME: Rename possibly_multivalued to multivalued and fix detection # for non-multivalued JOINs (e.g. foreign key fields). This should take # into account only many-to-many and one-to-many relationships. col.possibly_multivalued = LOOKUP_SEP in self.name return col def relabeled_clone(self, relabels): return self def get_group_by_cols(self): return []
ResolvedOuterRef
python
prakhar1989__Algorithms
tests/graph_test.py
{ "start": 123, "end": 3099 }
class ____(unittest.TestCase): def setUp(self): self.gr = graph() self.gr.add_nodes(["a", "b", "c", "d", "e", "f"]) self.gr.add_edge(("a","b")) self.gr.add_edge(("a","f")) self.gr.add_edge(("b","c")) self.gr.add_edge(("c","e")) self.gr.add_edge(("c","d")) self.gr.add_edge(("d","f")) def test_nodes_method(self): self.assertEqual(len(self.gr.nodes()), 6) def test_add_node_method(self): self.gr.add_node("g") self.assertEqual(len(self.gr.nodes()), 7) def test_has_node_method(self): self.assertTrue(self.gr.has_node("a")) def test_neighbors_method(self): self.assertEqual(len(self.gr.neighbors("a")), 2) def test_del_node_method(self): self.gr.del_node("a") self.assertFalse(self.gr.has_node("a")) self.assertEqual(len(self.gr.edges()), 8) def test_has_edge_method(self): self.assertTrue(self.gr.has_edge(("a", "b"))) self.assertFalse(self.gr.has_edge(("a", "d"))) def test_add_duplicate_node_method_throws_exception(self): self.assertRaises(Exception, self.gr.add_node, "a") def test_del_nonexistent_node_throws_exception(self): self.assertRaises(Exception, self.gr.del_node, "z") def test_add_duplicate_edge_throws_exception(self): self.assertRaises(Exception, self.gr.add_edge, ("a", "b")) def test_add_edge_from_non_existing_node(self): self.assertRaises(Exception, self.gr.add_edge, ("b", "z")) def test_adding_self_loop(self): self.gr.add_edge(("a", "a")) self.assertTrue(self.gr.has_edge(("a", "a"))) def test_remove_self_loop(self): self.gr.add_edge(("a", "a")) self.gr.del_edge(("a", "a")) self.assertFalse(self.gr.has_edge(("a", "a"))) def test_edges_method(self): self.assertEqual(len(self.gr.edges()), 2*6) def test_add_edges_method(self): self.gr.add_edges([("a", "c"), ("c", "f"), ("d", "e")]) self.assertTrue(self.gr.has_edge(("a", "c"))) self.assertTrue(self.gr.has_edge(("c", "f"))) self.assertTrue(self.gr.has_edge(("d", "e"))) def test_node_orders_method(self): self.assertEqual(self.gr.node_order("c"), 3) def test_del_edge_method(self): self.gr.del_edge(("a", "f")) self.assertFalse(self.gr.has_edge(("a", "f"))) def test_deleting_non_existing_edge_raises_exception(self): self.assertRaises(Exception, self.gr.del_edge, ("a", "z")) def test_get_default_weight(self): self.assertEqual(self.gr.get_edge_weight(("a", "b")), 1) def test_set_weight_on_existing_edge(self): self.gr.set_edge_weight(("a", "b"), 10) self.assertEqual(self.gr.get_edge_weight(("a", "b")), 10) def test_weight_for_nonexisting_edge(self): self.assertRaises(Exception, self.gr.get_edge_weight, ("a", "c")) if __name__ == "__main__": unittest.main()
test_graph
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1012429, "end": 1012943 }
class ____(sgqlc.types.Type): """Autogenerated return type of UnmarkIssueAsDuplicate""" __schema__ = github_schema __field_names__ = ("client_mutation_id", "duplicate") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" duplicate = sgqlc.types.Field("IssueOrPullRequest", graphql_name="duplicate") """The issue or pull request that was marked as a duplicate."""
UnmarkIssueAsDuplicatePayload
python
tiangolo__fastapi
docs_src/custom_request_and_route/tutorial003.py
{ "start": 138, "end": 1042 }
class ____(APIRoute): def get_route_handler(self) -> Callable: original_route_handler = super().get_route_handler() async def custom_route_handler(request: Request) -> Response: before = time.time() response: Response = await original_route_handler(request) duration = time.time() - before response.headers["X-Response-Time"] = str(duration) print(f"route duration: {duration}") print(f"route response: {response}") print(f"route response headers: {response.headers}") return response return custom_route_handler app = FastAPI() router = APIRouter(route_class=TimedRoute) @app.get("/") async def not_timed(): return {"message": "Not timed"} @router.get("/timed") async def timed(): return {"message": "It's the time of my life"} app.include_router(router)
TimedRoute
python
PyCQA__pyflakes
pyflakes/test/test_doctests.py
{ "start": 1410, "end": 12251 }
class ____(TestCase): withDoctest = True def test_scope_class(self): """Check that a doctest is given a DoctestScope.""" checker = self.flakes(""" m = None def doctest_stuff(): ''' >>> d = doctest_stuff() ''' f = m return f """) scopes = checker.deadScopes module_scopes = [ scope for scope in scopes if scope.__class__ is ModuleScope] doctest_scopes = [ scope for scope in scopes if scope.__class__ is DoctestScope] function_scopes = [ scope for scope in scopes if scope.__class__ is FunctionScope] self.assertEqual(len(module_scopes), 1) self.assertEqual(len(doctest_scopes), 1) module_scope = module_scopes[0] doctest_scope = doctest_scopes[0] self.assertIsInstance(doctest_scope, DoctestScope) self.assertIsInstance(doctest_scope, ModuleScope) self.assertNotIsInstance(doctest_scope, FunctionScope) self.assertNotIsInstance(module_scope, DoctestScope) self.assertIn('m', module_scope) self.assertIn('doctest_stuff', module_scope) self.assertIn('d', doctest_scope) self.assertEqual(len(function_scopes), 1) self.assertIn('f', function_scopes[0]) def test_nested_doctest_ignored(self): """Check that nested doctests are ignored.""" checker = self.flakes(""" m = None def doctest_stuff(): ''' >>> def function_in_doctest(): ... \"\"\" ... >>> ignored_undefined_name ... \"\"\" ... df = m ... return df ... >>> function_in_doctest() ''' f = m return f """) scopes = checker.deadScopes module_scopes = [ scope for scope in scopes if scope.__class__ is ModuleScope] doctest_scopes = [ scope for scope in scopes if scope.__class__ is DoctestScope] function_scopes = [ scope for scope in scopes if scope.__class__ is FunctionScope] self.assertEqual(len(module_scopes), 1) self.assertEqual(len(doctest_scopes), 1) module_scope = module_scopes[0] doctest_scope = doctest_scopes[0] self.assertIn('m', module_scope) self.assertIn('doctest_stuff', module_scope) self.assertIn('function_in_doctest', doctest_scope) self.assertEqual(len(function_scopes), 2) self.assertIn('f', function_scopes[0]) self.assertIn('df', function_scopes[1]) def test_global_module_scope_pollution(self): """Check that global in doctest does not pollute module scope.""" checker = self.flakes(""" def doctest_stuff(): ''' >>> def function_in_doctest(): ... global m ... m = 50 ... df = 10 ... m = df ... >>> function_in_doctest() ''' f = 10 return f """) scopes = checker.deadScopes module_scopes = [ scope for scope in scopes if scope.__class__ is ModuleScope] doctest_scopes = [ scope for scope in scopes if scope.__class__ is DoctestScope] function_scopes = [ scope for scope in scopes if scope.__class__ is FunctionScope] self.assertEqual(len(module_scopes), 1) self.assertEqual(len(doctest_scopes), 1) module_scope = module_scopes[0] doctest_scope = doctest_scopes[0] self.assertIn('doctest_stuff', module_scope) self.assertIn('function_in_doctest', doctest_scope) self.assertEqual(len(function_scopes), 2) self.assertIn('f', function_scopes[0]) self.assertIn('df', function_scopes[1]) self.assertIn('m', function_scopes[1]) self.assertNotIn('m', module_scope) def test_global_undefined(self): self.flakes(""" global m def doctest_stuff(): ''' >>> m ''' """, m.UndefinedName) def test_nested_class(self): """Doctest within nested class are processed.""" self.flakes(""" class C: class D: ''' >>> m ''' def doctest_stuff(self): ''' >>> m ''' return 1 """, m.UndefinedName, m.UndefinedName) def test_ignore_nested_function(self): """Doctest module does not process doctest in nested functions.""" # 'syntax error' would cause a SyntaxError if the doctest was processed. # However doctest does not find doctest in nested functions # (https://bugs.python.org/issue1650090). If nested functions were # processed, this use of m should cause UndefinedName, and the # name inner_function should probably exist in the doctest scope. self.flakes(""" def doctest_stuff(): def inner_function(): ''' >>> syntax error >>> inner_function() 1 >>> m ''' return 1 m = inner_function() return m """) def test_inaccessible_scope_class(self): """Doctest may not access class scope.""" self.flakes(""" class C: def doctest_stuff(self): ''' >>> m ''' return 1 m = 1 """, m.UndefinedName) def test_importBeforeDoctest(self): self.flakes(""" import foo def doctest_stuff(): ''' >>> foo ''' """) @skip("todo") def test_importBeforeAndInDoctest(self): self.flakes(''' import foo def doctest_stuff(): """ >>> import foo >>> foo """ foo ''', m.RedefinedWhileUnused) def test_importInDoctestAndAfter(self): self.flakes(''' def doctest_stuff(): """ >>> import foo >>> foo """ import foo foo() ''') def test_offsetInDoctests(self): exc = self.flakes(''' def doctest_stuff(): """ >>> x # line 5 """ ''', m.UndefinedName).messages[0] self.assertEqual(exc.lineno, 5) self.assertEqual(exc.col, 12) def test_offsetInLambdasInDoctests(self): exc = self.flakes(''' def doctest_stuff(): """ >>> lambda: x # line 5 """ ''', m.UndefinedName).messages[0] self.assertEqual(exc.lineno, 5) self.assertEqual(exc.col, 20) def test_offsetAfterDoctests(self): exc = self.flakes(''' def doctest_stuff(): """ >>> x = 5 """ x ''', m.UndefinedName).messages[0] self.assertEqual(exc.lineno, 8) self.assertEqual(exc.col, 0) def test_syntaxErrorInDoctest(self): exceptions = self.flakes( ''' def doctest_stuff(): """ >>> from # line 4 >>> fortytwo = 42 >>> except Exception: """ ''', m.DoctestSyntaxError, m.DoctestSyntaxError, m.DoctestSyntaxError).messages exc = exceptions[0] self.assertEqual(exc.lineno, 4) if not PYPY: self.assertEqual(exc.col, 18) else: self.assertEqual(exc.col, 26) # PyPy error column offset is 0, # for the second and third line of the doctest # i.e. at the beginning of the line exc = exceptions[1] self.assertEqual(exc.lineno, 5) if PYPY: self.assertEqual(exc.col, 13) else: self.assertEqual(exc.col, 16) exc = exceptions[2] self.assertEqual(exc.lineno, 6) self.assertEqual(exc.col, 13) def test_indentationErrorInDoctest(self): exc = self.flakes(''' def doctest_stuff(): """ >>> if True: ... pass """ ''', m.DoctestSyntaxError).messages[0] self.assertEqual(exc.lineno, 5) self.assertEqual(exc.col, 13) def test_offsetWithMultiLineArgs(self): (exc1, exc2) = self.flakes( ''' def doctest_stuff(arg1, arg2, arg3): """ >>> assert >>> this """ ''', m.DoctestSyntaxError, m.UndefinedName).messages self.assertEqual(exc1.lineno, 6) self.assertEqual(exc1.col, 19) self.assertEqual(exc2.lineno, 7) self.assertEqual(exc2.col, 12) def test_doctestCanReferToFunction(self): self.flakes(""" def foo(): ''' >>> foo ''' """) def test_doctestCanReferToClass(self): self.flakes(""" class Foo(): ''' >>> Foo ''' def bar(self): ''' >>> Foo ''' """) def test_noOffsetSyntaxErrorInDoctest(self): exceptions = self.flakes( ''' def buildurl(base, *args, **kwargs): """ >>> buildurl('/blah.php', ('a', '&'), ('b', '=') '/blah.php?a=%26&b=%3D' >>> buildurl('/blah.php', a='&', 'b'='=') '/blah.php?b=%3D&a=%26' """ pass ''', m.DoctestSyntaxError, m.DoctestSyntaxError).messages exc = exceptions[0] self.assertEqual(exc.lineno, 4) exc = exceptions[1] self.assertEqual(exc.lineno, 6) def test_singleUnderscoreInDoctest(self): self.flakes(''' def func(): """A docstring >>> func() 1 >>> _ 1 """ return 1 ''') def test_globalUnderscoreInDoctest(self): self.flakes(""" from gettext import ugettext as _ def doctest_stuff(): ''' >>> pass ''' """, m.UnusedImport)
Test
python
aio-libs__aiohttp
aiohttp/web_runner.py
{ "start": 6258, "end": 9917 }
class ____(ABC, Generic[_Request]): __slots__ = ("_handle_signals", "_kwargs", "_server", "_sites", "_shutdown_timeout") def __init__( self, *, handle_signals: bool = False, shutdown_timeout: float = 60.0, **kwargs: Any, ) -> None: self._handle_signals = handle_signals self._kwargs = kwargs self._server: Server[_Request] | None = None self._sites: list[BaseSite] = [] self._shutdown_timeout = shutdown_timeout @property def server(self) -> Server[_Request] | None: return self._server @property def addresses(self) -> list[Any]: ret: list[Any] = [] for site in self._sites: server = site._server if server is not None: sockets = server.sockets if sockets is not None: for sock in sockets: ret.append(sock.getsockname()) return ret @property def sites(self) -> set[BaseSite]: return set(self._sites) async def setup(self) -> None: loop = asyncio.get_event_loop() if self._handle_signals: try: loop.add_signal_handler(signal.SIGINT, _raise_graceful_exit) loop.add_signal_handler(signal.SIGTERM, _raise_graceful_exit) except NotImplementedError: # add_signal_handler is not implemented on Windows pass self._server = await self._make_server() @abstractmethod async def shutdown(self) -> None: """Call any shutdown hooks to help server close gracefully.""" async def cleanup(self) -> None: # The loop over sites is intentional, an exception on gather() # leaves self._sites in unpredictable state. # The loop guarantees that a site is either deleted on success or # still present on failure for site in list(self._sites): await site.stop() if self._server: # If setup succeeded # Yield to event loop to ensure incoming requests prior to stopping the sites # have all started to be handled before we proceed to close idle connections. await asyncio.sleep(0) self._server.pre_shutdown() await self.shutdown() await self._server.shutdown(self._shutdown_timeout) await self._cleanup_server() self._server = None if self._handle_signals: loop = asyncio.get_running_loop() try: loop.remove_signal_handler(signal.SIGINT) loop.remove_signal_handler(signal.SIGTERM) except NotImplementedError: # remove_signal_handler is not implemented on Windows pass @abstractmethod async def _make_server(self) -> Server[_Request]: """Return a new server for the runner to serve requests.""" @abstractmethod async def _cleanup_server(self) -> None: """Run any cleanup steps after the server is shutdown.""" def _reg_site(self, site: BaseSite) -> None: if site in self._sites: raise RuntimeError(f"Site {site} is already registered in runner {self}") self._sites.append(site) def _check_site(self, site: BaseSite) -> None: if site not in self._sites: raise RuntimeError(f"Site {site} is not registered in runner {self}") def _unreg_site(self, site: BaseSite) -> None: if site not in self._sites: raise RuntimeError(f"Site {site} is not registered in runner {self}") self._sites.remove(site)
BaseRunner
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/array_ops/init_ops_test.py
{ "start": 11332, "end": 12283 }
class ____(test.TestCase): @test_util.run_deprecated_v1 def testInitializerIdentical(self): for dtype in [dtypes.float32, dtypes.float64, dtypes.int64]: init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype) init2 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype) self.assertTrue(identicaltest(self, init1, init2)) @test_util.run_deprecated_v1 def testInitializerDifferent(self): for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64]: init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype) init2 = init_ops.random_uniform_initializer(0, 7, seed=2, dtype=dtype) self.assertFalse(identicaltest(self, init1, init2)) @test_util.run_deprecated_v1 def testDuplicatedInitializer(self): init = init_ops.random_uniform_initializer(0.0, 1.0) self.assertFalse(duplicated_initializer(self, init, 1))
RandomUniformInitializationTest
python
Textualize__textual
src/textual/app.py
{ "start": 8563, "end": 177770 }
class ____(Generic[ReturnType], DOMNode): """The base class for Textual Applications.""" CSS: ClassVar[str] = "" """Inline CSS, useful for quick scripts. This is loaded after CSS_PATH, and therefore takes priority in the event of a specificity clash.""" # Default (the lowest priority) CSS DEFAULT_CSS: ClassVar[str] DEFAULT_CSS = """ App { background: $background; color: $foreground; &:ansi { background: ansi_default; color: ansi_default; .-ansi-scrollbar { scrollbar-background: ansi_default; scrollbar-background-hover: ansi_default; scrollbar-background-active: ansi_default; scrollbar-color: ansi_blue; scrollbar-color-active: ansi_bright_blue; scrollbar-color-hover: ansi_bright_blue; scrollbar-corner-color: ansi_default; } .bindings-table--key { color: ansi_magenta; } .bindings-table--description { color: ansi_default; } .bindings-table--header { color: ansi_default; } .bindings-table--divider { color: transparent; text-style: dim; } } /* When a widget is maximized */ Screen.-maximized-view { layout: vertical !important; hatch: right $panel; overflow-y: auto !important; align: center middle; .-maximized { dock: initial !important; } } /* Fade the header title when app is blurred */ &:blur HeaderTitle { text-opacity: 50%; } } *:disabled:can-focus { opacity: 0.7; } """ MODES: ClassVar[dict[str, str | Callable[[], Screen]]] = {} """Modes associated with the app and their base screens. The base screen is the screen at the bottom of the mode stack. You can think of it as the default screen for that stack. The base screens can be names of screens listed in [SCREENS][textual.app.App.SCREENS], [`Screen`][textual.screen.Screen] instances, or callables that return screens. Example: ```py class HelpScreen(Screen[None]): ... class MainAppScreen(Screen[None]): ... class MyApp(App[None]): MODES = { "default": "main", "help": HelpScreen, } SCREENS = { "main": MainAppScreen, } ... ``` """ DEFAULT_MODE: ClassVar[str] = "_default" """Name of the default mode.""" SCREENS: ClassVar[dict[str, Callable[[], Screen[Any]]]] = {} """Screens associated with the app for the lifetime of the app.""" AUTO_FOCUS: ClassVar[str | None] = "*" """A selector to determine what to focus automatically when a screen is activated. The widget focused is the first that matches the given [CSS selector](/guide/queries/#query-selectors). Setting to `None` or `""` disables auto focus. """ ALLOW_SELECT: ClassVar[bool] = True """A switch to toggle arbitrary text selection for the app. Note that this doesn't apply to Input and TextArea which have builtin support for selection. """ _BASE_PATH: str | None = None CSS_PATH: ClassVar[CSSPathType | None] = None """File paths to load CSS from.""" TITLE: str | None = None """A class variable to set the *default* title for the application. To update the title while the app is running, you can set the [title][textual.app.App.title] attribute. See also [the `Screen.TITLE` attribute][textual.screen.Screen.TITLE]. """ SUB_TITLE: str | None = None """A class variable to set the default sub-title for the application. To update the sub-title while the app is running, you can set the [sub_title][textual.app.App.sub_title] attribute. See also [the `Screen.SUB_TITLE` attribute][textual.screen.Screen.SUB_TITLE]. """ ENABLE_COMMAND_PALETTE: ClassVar[bool] = True """Should the [command palette][textual.command.CommandPalette] be enabled for the application?""" NOTIFICATION_TIMEOUT: ClassVar[float] = 5 """Default number of seconds to show notifications before removing them.""" COMMANDS: ClassVar[set[type[Provider] | Callable[[], type[Provider]]]] = { get_system_commands_provider } """Command providers used by the [command palette](/guide/command_palette). Should be a set of [command.Provider][textual.command.Provider] classes. """ COMMAND_PALETTE_BINDING: ClassVar[str] = "ctrl+p" """The key that launches the command palette (if enabled by [`App.ENABLE_COMMAND_PALETTE`][textual.app.App.ENABLE_COMMAND_PALETTE]).""" COMMAND_PALETTE_DISPLAY: ClassVar[str | None] = None """How the command palette key should be displayed in the footer (or `None` for default).""" ALLOW_IN_MAXIMIZED_VIEW: ClassVar[str] = "Footer" """The default value of [Screen.ALLOW_IN_MAXIMIZED_VIEW][textual.screen.Screen.ALLOW_IN_MAXIMIZED_VIEW].""" CLICK_CHAIN_TIME_THRESHOLD: ClassVar[float] = 0.5 """The maximum number of seconds between clicks to upgrade a single click to a double click, a double click to a triple click, etc.""" BINDINGS: ClassVar[list[BindingType]] = [ Binding( "ctrl+q", "quit", "Quit", tooltip="Quit the app and return to the command prompt.", show=False, priority=True, ), Binding("ctrl+c", "help_quit", show=False, system=True), ] """The default key bindings.""" CLOSE_TIMEOUT: float | None = 5.0 """Timeout waiting for widget's to close, or `None` for no timeout.""" TOOLTIP_DELAY: float = 0.5 """The time in seconds after which a tooltip gets displayed.""" BINDING_GROUP_TITLE: str | None = None """Set to text to show in the key panel.""" ESCAPE_TO_MINIMIZE: ClassVar[bool] = True """Use escape key to minimize widgets (potentially overriding bindings). This is the default value, used if the active screen's `ESCAPE_TO_MINIMIZE` is not changed from `None`. """ INLINE_PADDING: ClassVar[int] = 1 """Number of blank lines above an inline app.""" SUSPENDED_SCREEN_CLASS: ClassVar[str] = "" """Class to apply to suspended screens, or empty string for no class.""" HORIZONTAL_BREAKPOINTS: ClassVar[list[tuple[int, str]]] | None = [] """List of horizontal breakpoints for responsive classes. This allows for styles to be responsive to the dimensions of the terminal. For instance, you might want to show less information, or fewer columns on a narrow displays -- or more information when the terminal is sized wider than usual. A breakpoint consists of a tuple containing the minimum width where the class should applied, and the name of the class to set. Note that only one class name is set, and if you should avoid having more than one breakpoint set for the same size. Example: ```python # Up to 80 cells wide, the app has the class "-normal" # 80 - 119 cells wide, the app has the class "-wide" # 120 cells or wider, the app has the class "-very-wide" HORIZONTAL_BREAKPOINTS = [(0, "-normal"), (80, "-wide"), (120, "-very-wide")] ``` """ VERTICAL_BREAKPOINTS: ClassVar[list[tuple[int, str]]] | None = [] """List of vertical breakpoints for responsive classes. Contents are the same as [`HORIZONTAL_BREAKPOINTS`][textual.app.App.HORIZONTAL_BREAKPOINTS], but the integer is compared to the height, rather than the width. """ _PSEUDO_CLASSES: ClassVar[dict[str, Callable[[App[Any]], bool]]] = { "focus": lambda app: app.app_focus, "blur": lambda app: not app.app_focus, "dark": lambda app: app.current_theme.dark, "light": lambda app: not app.current_theme.dark, "inline": lambda app: app.is_inline, "ansi": lambda app: app.ansi_color, "nocolor": lambda app: app.no_color, } title: Reactive[str] = Reactive("", compute=False) """The title of the app, displayed in the header.""" sub_title: Reactive[str] = Reactive("", compute=False) """The app's sub-title, combined with [`title`][textual.app.App.title] in the header.""" app_focus = Reactive(True, compute=False) """Indicates if the app has focus. When run in the terminal, the app always has focus. When run in the web, the app will get focus when the terminal widget has focus. """ theme: Reactive[str] = Reactive(constants.DEFAULT_THEME) """The name of the currently active theme.""" ansi_theme_dark = Reactive(MONOKAI, init=False) """Maps ANSI colors to hex colors using a Rich TerminalTheme object while using a dark theme.""" ansi_theme_light = Reactive(ALABASTER, init=False) """Maps ANSI colors to hex colors using a Rich TerminalTheme object while using a light theme.""" ansi_color = Reactive(False) """Allow ANSI colors in UI?""" def __init__( self, driver_class: Type[Driver] | None = None, css_path: CSSPathType | None = None, watch_css: bool = False, ansi_color: bool = False, ): """Create an instance of an app. Args: driver_class: Driver class or `None` to auto-detect. This will be used by some Textual tools. css_path: Path to CSS or `None` to use the `CSS_PATH` class variable. To load multiple CSS files, pass a list of strings or paths which will be loaded in order. watch_css: Reload CSS if the files changed. This is set automatically if you are using `textual run` with the `dev` switch. ansi_color: Allow ANSI colors if `True`, or convert ANSI colors to RGB if `False`. Raises: CssPathError: When the supplied CSS path(s) are an unexpected type. """ self._start_time = perf_counter() super().__init__(classes=self.DEFAULT_CLASSES) self.features: frozenset[FeatureFlag] = parse_features(os.getenv("TEXTUAL", "")) self._registered_themes: dict[str, Theme] = {} """Themes that have been registered with the App using `App.register_theme`. This excludes the built-in themes.""" for theme in BUILTIN_THEMES.values(): self.register_theme(theme) ansi_theme = ( self.ansi_theme_dark if self.current_theme.dark else self.ansi_theme_light ) self.set_reactive(App.ansi_color, ansi_color) self._filters: list[LineFilter] = [ ANSIToTruecolor(ansi_theme, enabled=not ansi_color) ] environ = dict(os.environ) self.no_color = environ.pop("NO_COLOR", None) is not None if self.no_color: self._filters.append(NoColor() if self.ansi_color else Monochrome()) for filter_name in constants.FILTERS.split(","): filter = filter_name.lower().strip() if filter == "dim": self._filters.append(DimFilter()) self.console = Console( color_system=constants.COLOR_SYSTEM, file=_NullFile(), markup=True, highlight=False, emoji=False, legacy_windows=False, _environ=environ, force_terminal=True, safe_box=False, soft_wrap=False, ) self._workers = WorkerManager(self) self.error_console = Console(markup=False, highlight=False, stderr=True) self.driver_class = driver_class or self.get_driver_class() self._screen_stacks: dict[str, list[Screen[Any]]] = {self.DEFAULT_MODE: []} """A stack of screens per mode.""" self._current_mode: str = self.DEFAULT_MODE """The current mode the app is in.""" self._sync_available = False self.mouse_over: Widget | None = None """The widget directly under the mouse.""" self.hover_over: Widget | None = None """The first widget with a hover style under the mouse.""" self.mouse_captured: Widget | None = None self._driver: Driver | None = None self._exit_renderables: list[RenderableType] = [] self._action_targets = {"app", "screen", "focused"} self._animator = Animator(self) self._animate = self._animator.bind(self) self.mouse_position = Offset(0, 0) self._mouse_down_widget: Widget | None = None """The widget that was most recently mouse downed (used to create click events).""" self._click_chain_last_offset: Offset | None = None """The last offset at which a Click occurred, in screen-space.""" self._click_chain_last_time: float | None = None """The last time at which a Click occurred.""" self._chained_clicks: int = 1 """Counter which tracks the number of clicks received in a row.""" self._previous_cursor_position = Offset(0, 0) """The previous cursor position""" self.cursor_position = Offset(0, 0) """The position of the terminal cursor in screen-space. This can be set by widgets and is useful for controlling the positioning of OS IME and emoji popup menus.""" self._exception: Exception | None = None """The unhandled exception which is leading to the app shutting down, or None if the app is still running with no unhandled exceptions.""" self.title = ( self.TITLE if self.TITLE is not None else f"{self.__class__.__name__}" ) """The title for the application. The initial value for `title` will be set to the `TITLE` class variable if it exists, or the name of the app if it doesn't. Assign a new value to this attribute to change the title. The new value is always converted to string. """ self.sub_title = self.SUB_TITLE if self.SUB_TITLE is not None else "" """The sub-title for the application. The initial value for `sub_title` will be set to the `SUB_TITLE` class variable if it exists, or an empty string if it doesn't. Sub-titles are typically used to show the high-level state of the app, such as the current mode, or path to the file being worked on. Assign a new value to this attribute to change the sub-title. The new value is always converted to string. """ self.use_command_palette: bool = self.ENABLE_COMMAND_PALETTE """A flag to say if the application should use the command palette. If set to `False` any call to [`action_command_palette`][textual.app.App.action_command_palette] will be ignored. """ self._logger = Logger(self._log, app=self) self._css_has_errors = False self.theme_variables: dict[str, str] = {} """Variables generated from the current theme.""" # Note that the theme must be set *before* self.get_css_variables() is called # to ensure that the variables are retrieved from the currently active theme. self.stylesheet = Stylesheet(variables=self.get_css_variables()) css_path = css_path or self.CSS_PATH css_paths = [ _make_path_object_relative(css_path, self) for css_path in ( _css_path_type_as_list(css_path) if css_path is not None else [] ) ] self.css_path = css_paths self._registry: WeakSet[DOMNode] = WeakSet() self._keymap: Keymap = {} # Sensitivity on X is double the sensitivity on Y to account for # cells being twice as tall as wide self.scroll_sensitivity_x: float = 4.0 """Number of columns to scroll in the X direction with wheel or trackpad.""" self.scroll_sensitivity_y: float = 2.0 """Number of lines to scroll in the Y direction with wheel or trackpad.""" self._installed_screens: dict[str, Screen | Callable[[], Screen]] = {} self._installed_screens.update(**self.SCREENS) self._modes: dict[str, str | Callable[[], Screen]] = self.MODES.copy() """Contains the working-copy of the `MODES` for each instance.""" self._compose_stacks: list[list[Widget]] = [] self._composed: list[list[Widget]] = [] self._recompose_required = False self.devtools: DevtoolsClient | None = None self._devtools_redirector: StdoutRedirector | None = None if "devtools" in self.features: try: from textual_dev.client import DevtoolsClient from textual_dev.redirect_output import StdoutRedirector except ImportError: # Dev dependencies not installed pass else: self.devtools = DevtoolsClient(constants.DEVTOOLS_HOST) self._devtools_redirector = StdoutRedirector(self.devtools) self._loop: asyncio.AbstractEventLoop | None = None self._return_value: ReturnType | None = None """Internal attribute used to set the return value for the app.""" self._return_code: int | None = None """Internal attribute used to set the return code for the app.""" self._exit = False self._disable_tooltips = False self._disable_notifications = False self.css_monitor = ( FileMonitor(self.css_path, self._on_css_change) if watch_css or self.debug else None ) self._screenshot: str | None = None self._dom_ready = False self._batch_count = 0 self._notifications = Notifications() self._capture_print: WeakKeyDictionary[MessageTarget, tuple[bool, bool]] = ( WeakKeyDictionary() ) """Registry of the MessageTargets which are capturing output at any given time.""" self._capture_stdout = _PrintCapture(self, stderr=False) """File-like object capturing data written to stdout.""" self._capture_stderr = _PrintCapture(self, stderr=True) """File-like object capturing data written to stderr.""" self._original_stdout = sys.__stdout__ """The original stdout stream (before redirection etc).""" self._original_stderr = sys.__stderr__ """The original stderr stream (before redirection etc).""" self.theme_changed_signal: Signal[Theme] = Signal(self, "theme-changed") """Signal that is published when the App's theme is changed. Subscribers will receive the new theme object as an argument to the callback. """ self.app_suspend_signal: Signal[App] = Signal(self, "app-suspend") """The signal that is published when the app is suspended. When [`App.suspend`][textual.app.App.suspend] is called this signal will be [published][textual.signal.Signal.publish]; [subscribe][textual.signal.Signal.subscribe] to this signal to perform work before the suspension takes place. """ self.app_resume_signal: Signal[App] = Signal(self, "app-resume") """The signal that is published when the app is resumed after a suspend. When the app is resumed after a [`App.suspend`][textual.app.App.suspend] call this signal will be [published][textual.signal.Signal.publish]; [subscribe][textual.signal.Signal.subscribe] to this signal to perform work after the app has resumed. """ self.set_class(self.current_theme.dark, "-dark-mode", update=False) self.set_class(not self.current_theme.dark, "-light-mode", update=False) self.animation_level: AnimationLevel = constants.TEXTUAL_ANIMATIONS """Determines what type of animations the app will display. See [`textual.constants.TEXTUAL_ANIMATIONS`][textual.constants.TEXTUAL_ANIMATIONS]. """ self._last_focused_on_app_blur: Widget | None = None """The widget that had focus when the last `AppBlur` happened. This will be used to restore correct focus when an `AppFocus` happens. """ self._previous_inline_height: int | None = None """Size of previous inline update.""" self._resize_event: events.Resize | None = None """A pending resize event, sent on idle.""" self._size: Size | None = None self._css_update_count: int = 0 """Incremented when CSS is invalidated.""" self._clipboard: str = "" """Contents of local clipboard.""" self.supports_smooth_scrolling: bool = False """Does the terminal support smooth scrolling?""" self._compose_screen: Screen | None = None """The screen composed by App.compose.""" if self.ENABLE_COMMAND_PALETTE: for _key, binding in self._bindings: if binding.action in {"command_palette", "app.command_palette"}: break else: self._bindings._add_binding( Binding( self.COMMAND_PALETTE_BINDING, "command_palette", "palette", show=False, key_display=self.COMMAND_PALETTE_DISPLAY, priority=True, tooltip="Open the command palette", ) ) def get_line_filters(self) -> Sequence[LineFilter]: """Get currently enabled line filters. Returns: A list of [LineFilter][textual.filters.LineFilter] instances. """ return [filter for filter in self._filters if filter.enabled] @property def _is_devtools_connected(self) -> bool: """Is the app connected to the devtools?""" return self.devtools is not None and self.devtools.is_connected @cached_property def _exception_event(self) -> asyncio.Event: """An event that will be set when the first exception is encountered.""" return asyncio.Event() def __init_subclass__(cls, *args, **kwargs) -> None: for variable_name, screen_collection in ( ("SCREENS", cls.SCREENS), ("MODES", cls.MODES), ): for screen_name, screen_object in screen_collection.items(): if not (isinstance(screen_object, str) or callable(screen_object)): if isinstance(screen_object, Screen): raise ValueError( f"{variable_name} should contain a Screen type or callable, not an instance" f" (got instance of {type(screen_object).__name__} for {screen_name!r})" ) raise TypeError( f"expected a callable or string, got {screen_object!r}" ) return super().__init_subclass__(*args, **kwargs) def _thread_init(self): """Initialize threading primitives for the current thread. https://github.com/Textualize/textual/issues/5845 """ self._message_queue self._mounted_event self._exception_event self._thread_id = threading.get_ident() def _get_dom_base(self) -> DOMNode: """When querying from the app, we want to query the default screen.""" return self.default_screen def validate_title(self, title: Any) -> str: """Make sure the title is set to a string.""" return str(title) def validate_sub_title(self, sub_title: Any) -> str: """Make sure the subtitle is set to a string.""" return str(sub_title) @property def default_screen(self) -> Screen: """The default screen instance.""" return self.screen if self._compose_screen is None else self._compose_screen @property def workers(self) -> WorkerManager: """The [worker](/guide/workers/) manager. Returns: An object to manage workers. """ return self._workers @property def return_value(self) -> ReturnType | None: """The return value of the app, or `None` if it has not yet been set. The return value is set when calling [exit][textual.app.App.exit]. """ return self._return_value @property def return_code(self) -> int | None: """The return code with which the app exited. Non-zero codes indicate errors. A value of 1 means the app exited with a fatal error. If the app hasn't exited yet, this will be `None`. Example: The return code can be used to exit the process via `sys.exit`. ```py my_app.run() sys.exit(my_app.return_code) ``` """ return self._return_code @property def children(self) -> Sequence["Widget"]: """A view onto the app's immediate children. This attribute exists on all widgets. In the case of the App, it will only ever contain a single child, which will be the currently active screen. Returns: A sequence of widgets. """ try: return ( next( screen for screen in reversed(self._screen_stack) if not isinstance(screen, SystemModalScreen) ), ) except StopIteration: return () @property def clipboard(self) -> str: """The value of the local clipboard. Note, that this only contains text copied in the app, and not text copied from elsewhere in the OS. """ return self._clipboard def format_title(self, title: str, sub_title: str) -> Content: """Format the title for display. Args: title: The title. sub_title: The sub title. Returns: Content instance with title and subtitle. """ title_content = Content(title) sub_title_content = Content(sub_title) if sub_title_content: return Content.assemble( title_content, (" — ", "dim"), sub_title_content.stylize("dim"), ) else: return title_content @contextmanager def batch_update(self) -> Generator[None, None, None]: """A context manager to suspend all repaints until the end of the batch.""" self._begin_batch() try: yield finally: self._end_batch() def _begin_batch(self) -> None: """Begin a batch update.""" self._batch_count += 1 def _end_batch(self) -> None: """End a batch update.""" self._batch_count -= 1 assert self._batch_count >= 0, "This won't happen if you use `batch_update`" if not self._batch_count: self.check_idle() def delay_update(self, delay: float = 0.05) -> None: """Delay updates for a short period of time. May be used to mask a brief transition. Consider this method only if you aren't able to use `App.batch_update`. Args: delay: Delay before updating. """ self._begin_batch() def end_batch() -> None: """Re-enable updates, and refresh screen.""" self._end_batch() if not self._batch_count: self.screen.refresh() self.set_timer(delay, end_batch, name="delay_update") @contextmanager def _context(self) -> Generator[None, None, None]: """Context manager to set ContextVars.""" app_reset_token = active_app.set(self) message_pump_reset_token = active_message_pump.set(self) try: yield finally: active_message_pump.reset(message_pump_reset_token) active_app.reset(app_reset_token) def _watch_ansi_color(self, ansi_color: bool) -> None: """Enable or disable the truecolor filter when the reactive changes""" for filter in self._filters: if isinstance(filter, ANSIToTruecolor): filter.enabled = not ansi_color def animate( self, attribute: str, value: float | Animatable, *, final_value: object = ..., duration: float | None = None, speed: float | None = None, delay: float = 0.0, easing: EasingFunction | str = DEFAULT_EASING, on_complete: CallbackType | None = None, level: AnimationLevel = "full", ) -> None: """Animate an attribute. See the guide for how to use the [animation](/guide/animation) system. Args: attribute: Name of the attribute to animate. value: The value to animate to. final_value: The final value of the animation. duration: The duration (in seconds) of the animation. speed: The speed of the animation. delay: A delay (in seconds) before the animation starts. easing: An easing method. on_complete: A callable to invoke when the animation is finished. level: Minimum level required for the animation to take place (inclusive). """ self._animate( attribute, value, final_value=final_value, duration=duration, speed=speed, delay=delay, easing=easing, on_complete=on_complete, level=level, ) async def stop_animation(self, attribute: str, complete: bool = True) -> None: """Stop an animation on an attribute. Args: attribute: Name of the attribute whose animation should be stopped. complete: Should the animation be set to its final value? Note: If there is no animation scheduled or running, this is a no-op. """ await self._animator.stop_animation(self, attribute, complete) @property def is_dom_root(self) -> bool: """Is this a root node (i.e. the App)?""" return True @property def is_attached(self) -> bool: """Is this node linked to the app through the DOM?""" return True @property def debug(self) -> bool: """Is debug mode enabled?""" return "debug" in self.features or constants.DEBUG @property def is_headless(self) -> bool: """Is the app running in 'headless' mode? Headless mode is used when running tests with [run_test][textual.app.App.run_test]. """ return False if self._driver is None else self._driver.is_headless @property def is_inline(self) -> bool: """Is the app running in 'inline' mode?""" return False if self._driver is None else self._driver.is_inline @property def is_web(self) -> bool: """Is the app running in 'web' mode via a browser?""" return False if self._driver is None else self._driver.is_web @property def screen_stack(self) -> list[Screen[Any]]: """A snapshot of the current screen stack. Returns: A snapshot of the current state of the screen stack. """ return self._screen_stacks[self._current_mode].copy() @property def _screen_stack(self) -> list[Screen[Any]]: """A reference to the current screen stack. Note: Consider using [`screen_stack`][textual.app.App.screen_stack] instead. Returns: A reference to the current screen stack. """ return self._screen_stacks[self._current_mode] @property def current_mode(self) -> str: """The name of the currently active mode.""" return self._current_mode @property def console_options(self) -> ConsoleOptions: """Get options for the Rich console. Returns: Console options (same object returned from `console.options`). """ size = ConsoleDimensions(*self.size) console = self.console return ConsoleOptions( max_height=size.height, size=size, legacy_windows=console.legacy_windows, min_width=1, max_width=size.width, encoding=console.encoding, is_terminal=console.is_terminal, ) def exit( self, result: ReturnType | None = None, return_code: int = 0, message: RenderableType | None = None, ) -> None: """Exit the app, and return the supplied result. Args: result: Return value. return_code: The return code. Use non-zero values for error codes. message: Optional message to display on exit. """ self._exit = True self._return_value = result self._return_code = return_code self.post_message(messages.ExitApp()) if message: self._exit_renderables.append(message) @property def focused(self) -> Widget | None: """The widget that is focused on the currently active screen, or `None`. Focused widgets receive keyboard input. Returns: The currently focused widget, or `None` if nothing is focused. """ focused = self.screen.focused if focused is not None and focused.loading: return None return focused @property def active_bindings(self) -> dict[str, ActiveBinding]: """Get currently active bindings. If no widget is focused, then app-level bindings are returned. If a widget is focused, then any bindings present in the active screen and app are merged and returned. This property may be used to inspect current bindings. Returns: A dict that maps keys on to binding information. """ return self.screen.active_bindings def get_system_commands(self, screen: Screen) -> Iterable[SystemCommand]: """A generator of system commands used in the command palette. Args: screen: The screen where the command palette was invoked from. Implement this method in your App subclass if you want to add custom commands. Here is an example: ```python def get_system_commands(self, screen: Screen) -> Iterable[SystemCommand]: yield from super().get_system_commands(screen) yield SystemCommand("Bell", "Ring the bell", self.bell) ``` !!! note Requires that [`SystemCommandsProvider`][textual.system_commands.SystemCommandsProvider] is in `App.COMMANDS` class variable. Yields: [SystemCommand][textual.app.SystemCommand] instances. """ if not self.ansi_color: yield SystemCommand( "Theme", "Change the current theme", self.action_change_theme, ) yield SystemCommand( "Quit", "Quit the application as soon as possible", self.action_quit, ) if screen.query("HelpPanel"): yield SystemCommand( "Keys", "Hide the keys and widget help panel", self.action_hide_help_panel, ) else: yield SystemCommand( "Keys", "Show help for the focused widget and a summary of available keys", self.action_show_help_panel, ) if screen.maximized is not None: yield SystemCommand( "Minimize", "Minimize the widget and restore to normal size", screen.action_minimize, ) elif screen.focused is not None and screen.focused.allow_maximize: yield SystemCommand( "Maximize", "Maximize the focused widget", screen.action_maximize ) yield SystemCommand( "Screenshot", "Save an SVG 'screenshot' of the current screen", lambda: self.set_timer(0.1, self.deliver_screenshot), ) def get_default_screen(self) -> Screen: """Get the default screen. This is called when the App is first composed. The returned screen instance will be the first screen on the stack. Implement this method if you would like to use a custom Screen as the default screen. Returns: A screen instance. """ return Screen(id="_default") def compose(self) -> ComposeResult: """Yield child widgets for a container. This method should be implemented in a subclass. """ yield from () def get_theme_variable_defaults(self) -> dict[str, str]: """Get the default values for the `variables` used in a theme. If the currently specified theme doesn't define a value for a variable, the value specified here will be used as a fallback. If a variable is referenced in CSS but does not appear either here or in the theme, the CSS will fail to parse on startup. This method allows applications to define their own variables, beyond those offered by Textual, which can then be overridden by a Theme. Returns: A mapping of variable name (e.g. "my-button-background-color") to value. Values can be any valid CSS value, e.g. "red 50%", "auto 90%", "#ff0000", "rgb(255, 0, 0)", etc. """ return {} def get_css_variables(self) -> dict[str, str]: """Get a mapping of variables used to pre-populate CSS. May be implemented in a subclass to add new CSS variables. Returns: A mapping of variable name to value. """ theme = self.current_theme # Build the Textual color system from the theme. # This will contain $secondary, $primary, $background, etc. variables = theme.to_color_system().generate() # Apply the additional variables from the theme variables = {**variables, **(theme.variables)} theme_variables = self.get_theme_variable_defaults() combined_variables = {**theme_variables, **variables} self.theme_variables = combined_variables return combined_variables def get_theme(self, theme_name: str) -> Theme | None: """Get a theme by name. Args: theme_name: The name of the theme to get. May also be a comma separated list of names, to pick the first available theme. Returns: A Theme instance and None if the theme doesn't exist. """ theme_names = [token.strip() for token in theme_name.split(",")] for theme_name in theme_names: if theme_name in self.available_themes: return self.available_themes[theme_name] return None def register_theme(self, theme: Theme) -> None: """Register a theme with the app. If the theme already exists, it will be overridden. After registering a theme, you can activate it by setting the `App.theme` attribute. To retrieve a registered theme, use the `App.get_theme` method. Args: theme: The theme to register. """ self._registered_themes[theme.name] = theme def unregister_theme(self, theme_name: str) -> None: """Unregister a theme with the app. Args: theme_name: The name of the theme to unregister. """ if theme_name in self._registered_themes: del self._registered_themes[theme_name] @property def available_themes(self) -> dict[str, Theme]: """All available themes (all built-in themes plus any that have been registered). A dictionary mapping theme names to Theme instances. """ return {**self._registered_themes} @property def current_theme(self) -> Theme: theme = self.get_theme(self.theme) if theme is None: theme = self.get_theme("textual-dark") assert theme is not None # validated by _validate_theme return theme def _validate_theme(self, theme_name: str) -> str: if theme_name not in self.available_themes: message = ( f"Theme {theme_name!r} has not been registered. " "Call 'App.register_theme' before setting the 'App.theme' attribute." ) raise InvalidThemeError(message) return theme_name def _watch_theme(self, theme_name: str) -> None: """Apply a theme to the application. This method is called when the theme reactive attribute is set. """ theme = self.current_theme dark = theme.dark self.ansi_color = theme_name == "textual-ansi" self.set_class(dark, "-dark-mode", update=False) self.set_class(not dark, "-light-mode", update=False) self._refresh_truecolor_filter(self.ansi_theme) self._invalidate_css() self.call_next(partial(self.refresh_css, animate=False)) self.call_next(self.theme_changed_signal.publish, theme) def _invalidate_css(self) -> None: """Invalidate CSS, so it will be refreshed.""" self._css_update_count += 1 def watch_ansi_theme_dark(self, theme: TerminalTheme) -> None: if self.current_theme.dark: self._refresh_truecolor_filter(theme) self._invalidate_css() self.call_next(self.refresh_css) def watch_ansi_theme_light(self, theme: TerminalTheme) -> None: if not self.current_theme.dark: self._refresh_truecolor_filter(theme) self._invalidate_css() self.call_next(self.refresh_css) @property def ansi_theme(self) -> TerminalTheme: """The ANSI TerminalTheme currently being used. Defines how colors defined as ANSI (e.g. `magenta`) inside Rich renderables are mapped to hex codes. """ return ( self.ansi_theme_dark if self.current_theme.dark else self.ansi_theme_light ) def _refresh_truecolor_filter(self, theme: TerminalTheme) -> None: """Update the ANSI to Truecolor filter, if available, with a new theme mapping. Args: theme: The new terminal theme to use for mapping ANSI to truecolor. """ filters = self._filters for index, filter in enumerate(filters): if isinstance(filter, ANSIToTruecolor): filters[index] = ANSIToTruecolor(theme, enabled=not self.ansi_color) return def get_driver_class(self) -> Type[Driver]: """Get a driver class for this platform. This method is called by the constructor, and unlikely to be required when building a Textual app. Returns: A Driver class which manages input and display. """ driver_class: Type[Driver] driver_import = constants.DRIVER if driver_import is not None: # The driver class is set from the environment # Syntax should be foo.bar.baz:MyDriver module_import, _, driver_symbol = driver_import.partition(":") driver_module = importlib.import_module(module_import) driver_class = getattr(driver_module, driver_symbol) if not inspect.isclass(driver_class) or not issubclass( driver_class, Driver ): raise RuntimeError( f"Unable to import {driver_import!r}; {driver_class!r} is not a Driver class " ) return driver_class if WINDOWS: from textual.drivers.windows_driver import WindowsDriver driver_class = WindowsDriver else: from textual.drivers.linux_driver import LinuxDriver driver_class = LinuxDriver return driver_class def __rich_repr__(self) -> rich.repr.Result: yield "title", self.title yield "id", self.id, None if self.name: yield "name", self.name if self.classes: yield "classes", set(self.classes) pseudo_classes = self.pseudo_classes if pseudo_classes: yield "pseudo_classes", set(pseudo_classes) @property def animator(self) -> Animator: """The animator object.""" return self._animator @property def screen(self) -> Screen[object]: """The current active screen. Returns: The currently active (visible) screen. Raises: ScreenStackError: If there are no screens on the stack. """ try: return self._screen_stack[-1] except KeyError: raise UnknownModeError(f"No known mode {self._current_mode!r}") from None except IndexError: raise ScreenStackError("No screens on stack") from None @property def _background_screens(self) -> list[Screen]: """A list of screens that may be visible due to background opacity (top-most first, not including current screen).""" screens: list[Screen] = [] for screen in reversed(self._screen_stack[:-1]): screens.append(screen) if screen.styles.background.a == 1: break background_screens = screens[::-1] return background_screens @property def size(self) -> Size: """The size of the terminal. Returns: Size of the terminal. """ if self._size is not None: return self._size if self._driver is not None and self._driver._size is not None: width, height = self._driver._size else: width, height = self.console.size return Size(width, height) @property def viewport_size(self) -> Size: """Get the viewport size (size of the screen).""" try: return self.screen.size except (ScreenStackError, NoScreen): return self.size def _get_inline_height(self) -> int: """Get the inline height (height when in inline mode). Returns: Height in lines. """ size = self.size return max(screen._get_inline_height(size) for screen in self._screen_stack) @property def log(self) -> Logger: """The textual logger. Example: ```python self.log("Hello, World!") self.log(self.tree) ``` Returns: A Textual logger. """ return self._logger def _log( self, group: LogGroup, verbosity: LogVerbosity, _textual_calling_frame: inspect.Traceback, *objects: Any, **kwargs, ) -> None: """Write to logs or devtools. Positional args will be logged. Keyword args will be prefixed with the key. Example: ```python data = [1,2,3] self.log("Hello, World", state=data) self.log(self.tree) self.log(locals()) ``` Args: verbosity: Verbosity level 0-3. """ devtools = self.devtools if devtools is None or not devtools.is_connected: return if verbosity.value > LogVerbosity.NORMAL.value and not devtools.verbose: return try: from textual_dev.client import DevtoolsLog if len(objects) == 1 and not kwargs: devtools.log( DevtoolsLog(objects, caller=_textual_calling_frame), group, verbosity, ) else: output = " ".join(str(arg) for arg in objects) if kwargs: key_values = " ".join( f"{key}={value!r}" for key, value in kwargs.items() ) output = f"{output} {key_values}" if output else key_values devtools.log( DevtoolsLog(output, caller=_textual_calling_frame), group, verbosity, ) except Exception as error: self._handle_exception(error) def get_loading_widget(self) -> Widget: """Get a widget to be used as a loading indicator. Extend this method if you want to display the loading state a little differently. Returns: A widget to display a loading state. """ from textual.widgets import LoadingIndicator return LoadingIndicator() def copy_to_clipboard(self, text: str) -> None: """Copy text to the clipboard. !!! note This does not work on macOS Terminal, but will work on most other terminals. Args: text: Text you wish to copy to the clipboard. """ self._clipboard = text if self._driver is None: return import base64 base64_text = base64.b64encode(text.encode("utf-8")).decode("utf-8") self._driver.write(f"\x1b]52;c;{base64_text}\a") def call_from_thread( self, callback: Callable[..., CallThreadReturnType | Awaitable[CallThreadReturnType]], *args: Any, **kwargs: Any, ) -> CallThreadReturnType: """Run a callable from another thread, and return the result. Like asyncio apps in general, Textual apps are not thread-safe. If you call methods or set attributes on Textual objects from a thread, you may get unpredictable results. This method will ensure that your code runs within the correct context. !!! tip Consider using [post_message][textual.message_pump.MessagePump.post_message] which is also thread-safe. Args: callback: A callable to run. *args: Arguments to the callback. **kwargs: Keyword arguments for the callback. Raises: RuntimeError: If the app isn't running or if this method is called from the same thread where the app is running. Returns: The result of the callback. """ if self._loop is None: raise RuntimeError("App is not running") if self._thread_id == threading.get_ident(): raise RuntimeError( "The `call_from_thread` method must run in a different thread from the app" ) callback_with_args = partial(callback, *args, **kwargs) async def run_callback() -> CallThreadReturnType: """Run the callback, set the result or error on the future.""" with self._context(): return await invoke(callback_with_args) # Post the message to the main loop future: Future[CallThreadReturnType] = asyncio.run_coroutine_threadsafe( run_callback(), loop=self._loop ) result = future.result() return result def action_change_theme(self) -> None: """An [action](/guide/actions) to change the current theme.""" self.search_themes() def action_screenshot( self, filename: str | None = None, path: str | None = None ) -> None: """This [action](/guide/actions) will save an SVG file containing the current contents of the screen. Args: filename: Filename of screenshot, or None to auto-generate. path: Path to directory. Defaults to the user's Downloads directory. """ self.deliver_screenshot(filename, path) def export_screenshot( self, *, title: str | None = None, simplify: bool = False, ) -> str: """Export an SVG screenshot of the current screen. See also [save_screenshot][textual.app.App.save_screenshot] which writes the screenshot to a file. Args: title: The title of the exported screenshot or None to use app title. simplify: Simplify the segments by combining contiguous segments with the same style. """ assert self._driver is not None, "App must be running" width, height = self.size console = Console( width=width, height=height, file=io.StringIO(), force_terminal=True, color_system="truecolor", record=True, legacy_windows=False, safe_box=False, ) screen_render = self.screen._compositor.render_update( full=True, screen_stack=self.app._background_screens, simplify=simplify ) console.print(screen_render) return console.export_svg(title=title or self.title) def save_screenshot( self, filename: str | None = None, path: str | None = None, time_format: str | None = None, ) -> str: """Save an SVG screenshot of the current screen. Args: filename: Filename of SVG screenshot, or None to auto-generate a filename with the date and time. path: Path to directory for output. Defaults to current working directory. time_format: Date and time format to use if filename is None. Defaults to a format like ISO 8601 with some reserved characters replaced with underscores. Returns: Filename of screenshot. """ path = path or "./" if not filename: svg_filename = generate_datetime_filename(self.title, ".svg", time_format) else: svg_filename = filename svg_path = os.path.expanduser(os.path.join(path, svg_filename)) screenshot_svg = self.export_screenshot() with open(svg_path, "w", encoding="utf-8") as svg_file: svg_file.write(screenshot_svg) return svg_path def deliver_screenshot( self, filename: str | None = None, path: str | None = None, time_format: str | None = None, ) -> str | None: """Deliver a screenshot of the app. This will save the screenshot when running locally, or serve it when the app is running in a web browser. Args: filename: Filename of SVG screenshot, or None to auto-generate a filename with the date and time. path: Path to directory for output when saving locally (not used when app is running in the browser). Defaults to current working directory. time_format: Date and time format to use if filename is None. Defaults to a format like ISO 8601 with some reserved characters replaced with underscores. Returns: The delivery key that uniquely identifies the file delivery. """ if not filename: svg_filename = generate_datetime_filename(self.title, ".svg", time_format) else: svg_filename = filename screenshot_svg = self.export_screenshot() return self.deliver_text( io.StringIO(screenshot_svg), save_directory=path, save_filename=svg_filename, open_method="browser", mime_type="image/svg+xml", name="screenshot", ) def search_commands( self, commands: Sequence[CommandListItem], placeholder: str = "Search for commands…", ) -> AwaitMount: """Show a list of commands in the app. Args: commands: A list of SimpleCommand instances. placeholder: Placeholder text for the search field. Returns: AwaitMount: An awaitable that resolves when the commands are shown. """ return self.push_screen( CommandPalette( providers=[SimpleProvider(self.screen, commands)], placeholder=placeholder, ) ) def search_themes(self) -> None: """Show a fuzzy search command palette containing all registered themes. Selecting a theme in the list will change the app's theme. """ self.push_screen( CommandPalette( providers=[ThemeProvider], placeholder="Search for themes…", ), ) def bind( self, keys: str, action: str, *, description: str = "", show: bool = True, key_display: str | None = None, ) -> None: """Bind a key to an action. !!! warning This method may be private or removed in a future version of Textual. See [dynamic actions](/guide/actions#dynamic-actions) for a more flexible alternative to updating bindings. Args: keys: A comma separated list of keys, i.e. action: Action to bind to. description: Short description of action. show: Show key in UI. key_display: Replacement text for key, or None to use default. """ self._bindings.bind( keys, action, description, show=show, key_display=key_display ) def get_key_display(self, binding: Binding) -> str: """Format a bound key for display in footer / key panel etc. !!! note You can implement this in a subclass if you want to change how keys are displayed in your app. Args: binding: A Binding. Returns: A string used to represent the key. """ # Dev has overridden the key display, so use that if binding.key_display: return binding.key_display # Extract modifiers modifiers, key = binding.parse_key() # Format the key (replace unicode names with character) key = format_key(key) # Convert ctrl modifier to caret if "ctrl" in modifiers: modifiers.pop(modifiers.index("ctrl")) key = f"^{key}" # Join everything with + key_tokens = modifiers + [key] return "+".join(key_tokens) async def _press_keys(self, keys: Iterable[str]) -> None: """A task to send key events.""" import unicodedata app = self driver = app._driver assert driver is not None for key in keys: if key.startswith("wait:"): _, wait_ms = key.split(":") await asyncio.sleep(float(wait_ms) / 1000) await app._animator.wait_until_complete() else: if len(key) == 1 and not key.isalnum(): key = _character_to_key(key) original_key = REPLACED_KEYS.get(key, key) char: str | None try: char = unicodedata.lookup(_get_unicode_name_from_key(original_key)) except KeyError: char = key if len(key) == 1 else None key_event = events.Key(key, char) key_event.set_sender(app) driver.send_message(key_event) await wait_for_idle(0) await app._animator.wait_until_complete() await wait_for_idle(0) def _flush(self, stderr: bool = False) -> None: """Called when stdout or stderr is flushed. Args: stderr: True if the print was to stderr, or False for stdout. """ if self._devtools_redirector is not None: self._devtools_redirector.flush() def _print(self, text: str, stderr: bool = False) -> None: """Called with captured print. Dispatches printed content to appropriate destinations: devtools, widgets currently capturing output, stdout/stderr. Args: text: Text that has been printed. stderr: True if the print was to stderr, or False for stdout. """ if self._devtools_redirector is not None: current_frame = inspect.currentframe() self._devtools_redirector.write( text, current_frame.f_back if current_frame is not None else None ) # If we're in headless mode, we want printed text to still reach stdout/stderr. if self.is_headless: target_stream = self._original_stderr if stderr else self._original_stdout target_stream.write(text) # Send Print events to all widgets that are currently capturing output. for target, (_stdout, _stderr) in self._capture_print.items(): if (_stderr and stderr) or (_stdout and not stderr): target.post_message(events.Print(text, stderr=stderr)) def begin_capture_print( self, target: MessageTarget, stdout: bool = True, stderr: bool = True ) -> None: """Capture content that is printed (or written to stdout / stderr). If printing is captured, the `target` will be sent an [events.Print][textual.events.Print] message. Args: target: The widget where print content will be sent. stdout: Capture stdout. stderr: Capture stderr. """ if not stdout and not stderr: self.end_capture_print(target) else: self._capture_print[target] = (stdout, stderr) def end_capture_print(self, target: MessageTarget) -> None: """End capturing of prints. Args: target: The widget that was capturing prints. """ self._capture_print.pop(target) @asynccontextmanager async def run_test( self, *, headless: bool = True, size: tuple[int, int] | None = (80, 24), tooltips: bool = False, notifications: bool = False, message_hook: Callable[[Message], None] | None = None, ) -> AsyncGenerator[Pilot[ReturnType], None]: """An asynchronous context manager for testing apps. !!! tip See the guide for [testing](/guide/testing) Textual apps. Use this to run your app in "headless" mode (no output) and drive the app via a [Pilot][textual.pilot.Pilot] object. Example: ```python async with app.run_test() as pilot: await pilot.click("#Button.ok") assert ... ``` Args: headless: Run in headless mode (no output or input). size: Force terminal size to `(WIDTH, HEIGHT)`, or None to auto-detect. tooltips: Enable tooltips when testing. notifications: Enable notifications when testing. message_hook: An optional callback that will be called each time any message arrives at any message pump in the app. """ from textual.pilot import Pilot app = self app._disable_tooltips = not tooltips app._disable_notifications = not notifications app_ready_event = asyncio.Event() def on_app_ready() -> None: """Called when app is ready to process events.""" app_ready_event.set() async def run_app(app: App[ReturnType]) -> None: """Run the apps message loop. Args: app: App to run. """ with app._context(): try: if message_hook is not None: message_hook_context_var.set(message_hook) app._loop = asyncio.get_running_loop() app._thread_id = threading.get_ident() await app._process_messages( ready_callback=on_app_ready, headless=headless, terminal_size=size, ) finally: app_ready_event.set() # Launch the app in the "background" self._task = app_task = create_task(run_app(app), name=f"run_test {app}") # Wait until the app has performed all startup routines. await app_ready_event.wait() with app._context(): # Context manager returns pilot object to manipulate the app try: pilot = Pilot(app) await pilot._wait_for_screen() yield pilot finally: await asyncio.sleep(0) # Shutdown the app cleanly await app._shutdown() await app_task # Re-raise the exception which caused panic so test frameworks are aware if self._exception: raise self._exception async def run_async( self, *, headless: bool = False, inline: bool = False, inline_no_clear: bool = False, mouse: bool = True, size: tuple[int, int] | None = None, auto_pilot: AutopilotCallbackType | None = None, ) -> ReturnType | None: """Run the app asynchronously. Args: headless: Run in headless mode (no output). inline: Run the app inline (under the prompt). inline_no_clear: Don't clear the app output when exiting an inline app. mouse: Enable mouse support. size: Force terminal size to `(WIDTH, HEIGHT)`, or None to auto-detect. auto_pilot: An autopilot coroutine. Returns: App return value. """ from textual.pilot import Pilot app = self auto_pilot_task: Task | None = None if auto_pilot is None and constants.PRESS: keys = constants.PRESS.split(",") async def press_keys(pilot: Pilot[ReturnType]) -> None: """Auto press keys.""" await pilot.press(*keys) auto_pilot = press_keys async def app_ready() -> None: """Called by the message loop when the app is ready.""" nonlocal auto_pilot_task if auto_pilot is not None: async def run_auto_pilot( auto_pilot: AutopilotCallbackType, pilot: Pilot ) -> None: with self._context(): try: await auto_pilot(pilot) except Exception: app.exit() raise pilot = Pilot(app) auto_pilot_task = create_task( run_auto_pilot(auto_pilot, pilot), name=repr(pilot) ) self._thread_init() loop = app._loop = asyncio.get_running_loop() if hasattr(asyncio, "eager_task_factory"): loop.set_task_factory(asyncio.eager_task_factory) with app._context(): try: await app._process_messages( ready_callback=None if auto_pilot is None else app_ready, headless=headless, inline=inline, inline_no_clear=inline_no_clear, mouse=mouse, terminal_size=size, ) finally: try: if auto_pilot_task is not None: await auto_pilot_task finally: try: await asyncio.shield(app._shutdown()) except asyncio.CancelledError: pass app._loop = None app._thread_id = 0 return app.return_value def run( self, *, headless: bool = False, inline: bool = False, inline_no_clear: bool = False, mouse: bool = True, size: tuple[int, int] | None = None, auto_pilot: AutopilotCallbackType | None = None, loop: AbstractEventLoop | None = None, ) -> ReturnType | None: """Run the app. Args: headless: Run in headless mode (no output). inline: Run the app inline (under the prompt). inline_no_clear: Don't clear the app output when exiting an inline app. mouse: Enable mouse support. size: Force terminal size to `(WIDTH, HEIGHT)`, or None to auto-detect. auto_pilot: An auto pilot coroutine. loop: Asyncio loop instance, or `None` to use default. Returns: App return value. """ async def run_app() -> ReturnType | None: """Run the app.""" return await self.run_async( headless=headless, inline=inline, inline_no_clear=inline_no_clear, mouse=mouse, size=size, auto_pilot=auto_pilot, ) if loop is None: if _ASYNCIO_GET_EVENT_LOOP_IS_DEPRECATED: # N.B. This does work with Python<3.10, but global Locks, Events, etc # eagerly bind the event loop, and result in Future bound to wrong # loop errors. return asyncio.run(run_app()) try: global_loop = asyncio.get_event_loop() except RuntimeError: # the global event loop may have been destroyed by someone running # asyncio.run(), or asyncio.set_event_loop(None), in which case # we need to use asyncio.run() also. (We run this outside the # context of an exception handler) pass else: return global_loop.run_until_complete(run_app()) return asyncio.run(run_app()) return loop.run_until_complete(run_app()) async def _on_css_change(self) -> None: """Callback for the file monitor, called when CSS files change.""" css_paths = ( self.css_monitor._paths if self.css_monitor is not None else self.css_path ) if css_paths: try: time = perf_counter() stylesheet = self.stylesheet.copy() try: stylesheet.read_all(css_paths) except StylesheetError as error: # If one of the CSS paths is no longer available (or perhaps temporarily unavailable), # we'll end up with partial CSS, which is probably confusing more than anything. We opt to do # nothing here, knowing that we'll retry again very soon, on the next file monitor invocation. # Related issue: https://github.com/Textualize/textual/issues/3996 self.log.warning(str(error)) return stylesheet.parse() elapsed = (perf_counter() - time) * 1000 if self._css_has_errors: from rich.panel import Panel self.log.system( Panel( "CSS files successfully loaded after previous error:\n\n- " + "\n- ".join(str(path) for path in css_paths), style="green", border_style="green", ) ) self.log.system( f"<stylesheet> loaded {len(css_paths)} CSS files in {elapsed:.0f} ms" ) except Exception as error: # TODO: Catch specific exceptions self._css_has_errors = True self.log.error(error) self.bell() else: self._css_has_errors = False self.stylesheet = stylesheet self.stylesheet.update(self) for screen in self.screen_stack: self.stylesheet.update(screen) def render(self) -> RenderResult: """Render method, inherited from widget, to render the screen's background. May be overridden to customize background visuals. """ return Blank(self.styles.background) ExpectType = TypeVar("ExpectType", bound=Widget) if TYPE_CHECKING: @overload def get_child_by_id(self, id: str) -> Widget: ... @overload def get_child_by_id( self, id: str, expect_type: type[ExpectType] ) -> ExpectType: ... def get_child_by_id( self, id: str, expect_type: type[ExpectType] | None = None ) -> ExpectType | Widget: """Get the first child (immediate descendant) of this DOMNode with the given ID. Args: id: The ID of the node to search for. expect_type: Require the object be of the supplied type, or use `None` to apply no type restriction. Returns: The first child of this node with the specified ID. Raises: NoMatches: If no children could be found for this ID. WrongType: If the wrong type was found. """ return ( self.screen.get_child_by_id(id) if expect_type is None else self.screen.get_child_by_id(id, expect_type) ) if TYPE_CHECKING: @overload def get_widget_by_id(self, id: str) -> Widget: ... @overload def get_widget_by_id( self, id: str, expect_type: type[ExpectType] ) -> ExpectType: ... def get_widget_by_id( self, id: str, expect_type: type[ExpectType] | None = None ) -> ExpectType | Widget: """Get the first descendant widget with the given ID. Performs a breadth-first search rooted at the current screen. It will not return the Screen if that matches the ID. To get the screen, use `self.screen`. Args: id: The ID to search for in the subtree expect_type: Require the object be of the supplied type, or None for any type. Defaults to None. Returns: The first descendant encountered with this ID. Raises: NoMatches: if no children could be found for this ID WrongType: if the wrong type was found. """ return ( self.screen.get_widget_by_id(id) if expect_type is None else self.screen.get_widget_by_id(id, expect_type) ) def get_child_by_type(self, expect_type: type[ExpectType]) -> ExpectType: """Get a child of a give type. Args: expect_type: The type of the expected child. Raises: NoMatches: If no valid child is found. Returns: A widget. """ return self.screen.get_child_by_type(expect_type) def update_styles(self, node: DOMNode) -> None: """Immediately update the styles of this node and all descendant nodes. Should be called whenever CSS classes / pseudo classes change. For example, when you hover over a button, the :hover pseudo class will be added, and this method is called to apply the corresponding :hover styles. """ descendants = node.walk_children(with_self=True) self.stylesheet.update_nodes(descendants, animate=True) def mount( self, *widgets: Widget, before: int | str | Widget | None = None, after: int | str | Widget | None = None, ) -> AwaitMount: """Mount the given widgets relative to the app's screen. Args: *widgets: The widget(s) to mount. before: Optional location to mount before. An `int` is the index of the child to mount before, a `str` is a `query_one` query to find the widget to mount before. after: Optional location to mount after. An `int` is the index of the child to mount after, a `str` is a `query_one` query to find the widget to mount after. Returns: An awaitable object that waits for widgets to be mounted. Raises: MountError: If there is a problem with the mount request. Note: Only one of `before` or `after` can be provided. If both are provided a `MountError` will be raised. """ return self.screen.mount(*widgets, before=before, after=after) def mount_all( self, widgets: Iterable[Widget], *, before: int | str | Widget | None = None, after: int | str | Widget | None = None, ) -> AwaitMount: """Mount widgets from an iterable. Args: widgets: An iterable of widgets. before: Optional location to mount before. An `int` is the index of the child to mount before, a `str` is a `query_one` query to find the widget to mount before. after: Optional location to mount after. An `int` is the index of the child to mount after, a `str` is a `query_one` query to find the widget to mount after. Returns: An awaitable object that waits for widgets to be mounted. Raises: MountError: If there is a problem with the mount request. Note: Only one of `before` or `after` can be provided. If both are provided a `MountError` will be raised. """ return self.mount(*widgets, before=before, after=after) def _init_mode(self, mode: str) -> AwaitMount: """Do internal initialization of a new screen stack mode. Args: mode: Name of the mode. Returns: An optionally awaitable object which can be awaited until the screen associated with the mode has been mounted. """ stack = self._screen_stacks.get(mode, []) if stack: # Mode already exists # Return an dummy await return AwaitMount(stack[0], []) if mode in self._modes: # Mode is defined in MODES _screen = self._modes[mode] if isinstance(_screen, Screen): raise TypeError( "MODES cannot contain instances, use a type instead " f"(got instance of {type(_screen).__name__} for {mode!r})" ) new_screen: Screen | str = _screen() if callable(_screen) else _screen screen, await_mount = self._get_screen(new_screen) stack.append(screen) self._load_screen_css(screen) if screen._css_update_count != self._css_update_count: self.refresh_css() screen.post_message(events.ScreenResume()) else: # Mode is not defined screen = self.get_default_screen() stack.append(screen) self._register(self, screen) screen.post_message(events.ScreenResume()) await_mount = AwaitMount(stack[0], []) screen._screen_resized(self.size) self._screen_stacks[mode] = stack return await_mount def switch_mode(self, mode: str) -> AwaitMount: """Switch to a given mode. Args: mode: The mode to switch to. Returns: An optionally awaitable object which waits for the screen associated with the mode to be mounted. Raises: UnknownModeError: If trying to switch to an unknown mode. """ if mode == self._current_mode: return AwaitMount(self.screen, []) if mode not in self._modes: raise UnknownModeError(f"No known mode {mode!r}") self.screen.post_message(events.ScreenSuspend()) self.screen.refresh() if mode not in self._screen_stacks: await_mount = self._init_mode(mode) else: await_mount = AwaitMount(self.screen, []) self._current_mode = mode if self.screen._css_update_count != self._css_update_count: self.refresh_css() self.screen._screen_resized(self.size) self.screen.post_message(events.ScreenResume()) self.log.system(f"{self._current_mode!r} is the current mode") self.log.system(f"{self.screen} is active") return await_mount def add_mode(self, mode: str, base_screen: str | Callable[[], Screen]) -> None: """Adds a mode and its corresponding base screen to the app. Args: mode: The new mode. base_screen: The base screen associated with the given mode. Raises: InvalidModeError: If the name of the mode is not valid/duplicated. """ if mode == "_default": raise InvalidModeError("Cannot use '_default' as a custom mode.") elif mode in self._modes: raise InvalidModeError(f"Duplicated mode name {mode!r}.") if isinstance(base_screen, Screen): raise TypeError( "add_mode() must be called with a Screen type, not an instance" f" (got instance of {type(base_screen).__name__})" ) self._modes[mode] = base_screen def remove_mode(self, mode: str) -> AwaitComplete: """Removes a mode from the app. Screens that are running in the stack of that mode are scheduled for pruning. Args: mode: The mode to remove. It can't be the active mode. Raises: ActiveModeError: If trying to remove the active mode. UnknownModeError: If trying to remove an unknown mode. """ if mode == self._current_mode: raise ActiveModeError(f"Can't remove active mode {mode!r}") elif mode not in self._modes: raise UnknownModeError(f"Unknown mode {mode!r}") else: del self._modes[mode] if mode not in self._screen_stacks: return AwaitComplete.nothing() stack = self._screen_stacks[mode] del self._screen_stacks[mode] async def remove_screens() -> None: """Remove screens.""" for screen in reversed(stack): await self._replace_screen(screen) return AwaitComplete(remove_screens()).call_next(self) def is_screen_installed(self, screen: Screen | str) -> bool: """Check if a given screen has been installed. Args: screen: Either a Screen object or screen name (the `name` argument when installed). Returns: True if the screen is currently installed, """ if isinstance(screen, str): return screen in self._installed_screens else: return screen in self._installed_screens.values() @overload def get_screen(self, screen: ScreenType) -> ScreenType: ... @overload def get_screen(self, screen: str) -> Screen: ... @overload def get_screen( self, screen: str, screen_class: Type[ScreenType] | None = None ) -> ScreenType: ... @overload def get_screen( self, screen: ScreenType, screen_class: Type[ScreenType] | None = None ) -> ScreenType: ... def get_screen( self, screen: Screen | str, screen_class: Type[Screen] | None = None ) -> Screen: """Get an installed screen. Example: ```python my_screen = self.get_screen("settings", MyScreen) ``` Args: screen: Either a Screen object or screen name (the `name` argument when installed). screen_class: Class of expected screen, or `None` for any screen class. Raises: KeyError: If the named screen doesn't exist. Returns: A screen instance. """ if isinstance(screen, str): try: next_screen = self._installed_screens[screen] except KeyError: raise KeyError(f"No screen called {screen!r} installed") from None if callable(next_screen): next_screen = next_screen() self._installed_screens[screen] = next_screen else: next_screen = screen if screen_class is not None and not isinstance(next_screen, screen_class): raise TypeError( f"Expected a screen of type {screen_class}, got {type(next_screen)}" ) return next_screen def _get_screen(self, screen: Screen | str) -> tuple[Screen, AwaitMount]: """Get an installed screen and an AwaitMount object. If the screen isn't running, it will be registered before it is run. Args: screen: Either a Screen object or screen name (the `name` argument when installed). Raises: KeyError: If the named screen doesn't exist. Returns: A screen instance and an awaitable that awaits the children mounting. """ _screen = self.get_screen(screen) if not _screen.is_running: widgets = self._register(self, _screen) await_mount = AwaitMount(_screen, widgets) self.call_next(await_mount) return (_screen, await_mount) else: await_mount = AwaitMount(_screen, []) self.call_next(await_mount) return (_screen, await_mount) def _load_screen_css(self, screen: Screen): """Loads the CSS associated with a screen.""" if self.css_monitor is not None: self.css_monitor.add_paths(screen.css_path) update = False for path in screen.css_path: if not self.stylesheet.has_source(str(path), ""): self.stylesheet.read(path) update = True if screen.CSS: try: screen_path = inspect.getfile(screen.__class__) except (TypeError, OSError): screen_path = "" screen_class_var = f"{screen.__class__.__name__}.CSS" read_from = (screen_path, screen_class_var) if not self.stylesheet.has_source(screen_path, screen_class_var): self.stylesheet.add_source( screen.CSS, read_from=read_from, is_default_css=False, scope=screen._css_type_name if screen.SCOPED_CSS else "", ) update = True if update: self.stylesheet.reparse() self.stylesheet.update(self) async def _replace_screen(self, screen: Screen) -> Screen: """Handle the replaced screen. Args: screen: A screen object. Returns: The screen that was replaced. """ if self._screen_stack: self.screen.refresh() screen.post_message(events.ScreenSuspend()) self.log.system(f"{screen} SUSPENDED") if not self.is_screen_installed(screen) and all( screen not in stack for stack in self._screen_stacks.values() ): self.capture_mouse(None) await screen.remove() self.log.system(f"{screen} REMOVED") return screen if TYPE_CHECKING: @overload def push_screen( self, screen: Screen[ScreenResultType] | str, callback: ScreenResultCallbackType[ScreenResultType] | None = None, wait_for_dismiss: Literal[False] = False, ) -> AwaitMount: ... @overload def push_screen( self, screen: Screen[ScreenResultType] | str, callback: ScreenResultCallbackType[ScreenResultType] | None = None, wait_for_dismiss: Literal[True] = True, ) -> asyncio.Future[ScreenResultType]: ... def push_screen( self, screen: Screen[ScreenResultType] | str, callback: ScreenResultCallbackType[ScreenResultType] | None = None, wait_for_dismiss: bool = False, ) -> AwaitMount | asyncio.Future[ScreenResultType]: """Push a new [screen](/guide/screens) on the screen stack, making it the current screen. Args: screen: A Screen instance or the name of an installed screen. callback: An optional callback function that will be called if the screen is [dismissed][textual.screen.Screen.dismiss] with a result. wait_for_dismiss: If `True`, awaiting this method will return the dismiss value from the screen. When set to `False`, awaiting this method will wait for the screen to be mounted. Note that `wait_for_dismiss` should only be set to `True` when running in a worker. Raises: NoActiveWorker: If using `wait_for_dismiss` outside of a worker. Returns: An optional awaitable that awaits the mounting of the screen and its children, or an asyncio Future to await the result of the screen. """ if not isinstance(screen, (Screen, str)): raise TypeError( f"push_screen requires a Screen instance or str; not {screen!r}" ) try: loop = asyncio.get_running_loop() except RuntimeError: # Mainly for testing, when push_screen isn't called in an async context future: asyncio.Future[ScreenResultType] = asyncio.Future() else: future = loop.create_future() self.app.capture_mouse(None) if self._screen_stack: self.screen.post_message(events.ScreenSuspend()) self.screen.refresh() next_screen, await_mount = self._get_screen(screen) try: message_pump = active_message_pump.get() except LookupError: message_pump = self.app next_screen._push_result_callback(message_pump, callback, future) self._load_screen_css(next_screen) next_screen._update_auto_focus() self._screen_stack.append(next_screen) next_screen.post_message(events.ScreenResume()) self.log.system(f"{self.screen} is current (PUSHED)") if wait_for_dismiss: try: get_current_worker() except NoActiveWorker: raise NoActiveWorker( "push_screen must be run from a worker when `wait_for_dismiss` is True" ) from None return future else: return await_mount if TYPE_CHECKING: @overload async def push_screen_wait( self, screen: Screen[ScreenResultType] ) -> ScreenResultType: ... @overload async def push_screen_wait(self, screen: str) -> Any: ... async def push_screen_wait( self, screen: Screen[ScreenResultType] | str ) -> ScreenResultType | Any: """Push a screen and wait for the result (received from [`Screen.dismiss`][textual.screen.Screen.dismiss]). Note that this method may only be called when running in a worker. Args: screen: A screen or the name of an installed screen. Returns: The screen's result. """ await self._flush_next_callbacks() # The shield prevents the cancellation of the current task from canceling the push_screen awaitable return await asyncio.shield(self.push_screen(screen, wait_for_dismiss=True)) def switch_screen(self, screen: Screen | str) -> AwaitComplete: """Switch to another [screen](/guide/screens) by replacing the top of the screen stack with a new screen. Args: screen: Either a Screen object or screen name (the `name` argument when installed). """ if not isinstance(screen, (Screen, str)): raise TypeError( f"switch_screen requires a Screen instance or str; not {screen!r}" ) next_screen, await_mount = self._get_screen(screen) if screen is self.screen or next_screen is self.screen: self.log.system(f"Screen {screen} is already current.") return AwaitComplete.nothing() self.app.capture_mouse(None) top_screen = self._screen_stack.pop() top_screen._pop_result_callback() self._load_screen_css(next_screen) self._screen_stack.append(next_screen) self.screen.post_message(events.ScreenResume()) self.screen._push_result_callback(self.screen, None) self.log.system(f"{self.screen} is current (SWITCHED)") async def do_switch() -> None: """Task to perform switch.""" await await_mount() await self._replace_screen(top_screen) return AwaitComplete(do_switch()).call_next(self) def install_screen(self, screen: Screen, name: str) -> None: """Install a screen. Installing a screen prevents Textual from destroying it when it is no longer on the screen stack. Note that you don't need to install a screen to use it. See [push_screen][textual.app.App.push_screen] or [switch_screen][textual.app.App.switch_screen] to make a new screen current. Args: screen: Screen to install. name: Unique name to identify the screen. Raises: ScreenError: If the screen can't be installed. Returns: An awaitable that awaits the mounting of the screen and its children. """ if name in self._installed_screens: raise ScreenError(f"Can't install screen; {name!r} is already installed") if screen in self._installed_screens.values(): raise ScreenError( f"Can't install screen; {screen!r} has already been installed" ) self._installed_screens[name] = screen self.log.system(f"{screen} INSTALLED name={name!r}") def uninstall_screen(self, screen: Screen | str) -> str | None: """Uninstall a screen. If the screen was not previously installed, then this method is a null-op. Uninstalling a screen allows Textual to delete it when it is popped or switched. Note that uninstalling a screen is only required if you have previously installed it with [install_screen][textual.app.App.install_screen]. Textual will also uninstall screens automatically on exit. Args: screen: The screen to uninstall or the name of an installed screen. Returns: The name of the screen that was uninstalled, or None if no screen was uninstalled. """ if isinstance(screen, str): if screen not in self._installed_screens: return None uninstall_screen = self._installed_screens[screen] if any(uninstall_screen in stack for stack in self._screen_stacks.values()): raise ScreenStackError("Can't uninstall screen in screen stack") del self._installed_screens[screen] self.log.system(f"{uninstall_screen} UNINSTALLED name={screen!r}") return screen else: if any(screen in stack for stack in self._screen_stacks.values()): raise ScreenStackError("Can't uninstall screen in screen stack") for name, installed_screen in self._installed_screens.items(): if installed_screen is screen: self._installed_screens.pop(name) self.log.system(f"{screen} UNINSTALLED name={name!r}") return name return None def pop_screen(self) -> AwaitComplete: """Pop the current [screen](/guide/screens) from the stack, and switch to the previous screen. Returns: The screen that was replaced. """ screen_stack = self._screen_stack if len(screen_stack) <= 1: raise ScreenStackError( "Can't pop screen; there must be at least one screen on the stack" ) previous_screen = screen_stack.pop() previous_screen._pop_result_callback() self.screen.post_message(events.ScreenResume()) self.log.system(f"{self.screen} is active") async def do_pop() -> None: """Task to pop the screen.""" await self._replace_screen(previous_screen) return AwaitComplete(do_pop()).call_next(self) def _pop_to_screen(self, screen: Screen) -> None: """Pop screens until the given screen is active. Args: screen: desired active screen Raises: ScreenError: If the screen doesn't exist in the stack. """ screens_to_pop: list[Screen] = [] for pop_screen in reversed(self.screen_stack): if pop_screen is not screen: screens_to_pop.append(pop_screen) else: break else: raise ScreenError(f"Screen {screen!r} not in screen stack") async def pop_screens() -> None: """Pop any screens in `screens_to_pop`.""" with self.batch_update(): for screen in screens_to_pop: await screen.dismiss() if screens_to_pop: self.call_later(pop_screens) def set_focus(self, widget: Widget | None, scroll_visible: bool = True) -> None: """Focus (or unfocus) a widget. A focused widget will receive key events first. Args: widget: Widget to focus. scroll_visible: Scroll widget into view. """ self.screen.set_focus(widget, scroll_visible) def _set_mouse_over( self, widget: Widget | None, hover_widget: Widget | None ) -> None: """Called when the mouse is over another widget. Args: widget: Widget under mouse, or None for no widgets. """ if widget is None: if self.mouse_over is not None: try: self.mouse_over.post_message(events.Leave(self.mouse_over)) finally: self.mouse_over = None else: if self.mouse_over is not widget: try: if self.mouse_over is not None: self.mouse_over.post_message(events.Leave(self.mouse_over)) if widget is not None: widget.post_message(events.Enter(widget)) finally: self.mouse_over = widget current_hover_over = self.hover_over if current_hover_over is not None: current_hover_over.mouse_hover = False if hover_widget is not None: hover_widget.mouse_hover = True if hover_widget._has_hover_style: hover_widget._update_styles() if current_hover_over is not None and current_hover_over._has_hover_style: current_hover_over._update_styles() self.hover_over = hover_widget def _update_mouse_over(self, screen: Screen) -> None: """Updates the mouse over after the next refresh. This method is called whenever a widget is added or removed, which may change the widget under the mouse. """ if self.mouse_over is None or not screen.is_active: return async def check_mouse() -> None: """Check if the mouse over widget has changed.""" try: hover_widgets = screen.get_hover_widgets_at(*self.mouse_position) except NoWidget: pass else: mouse_over, hover_over = hover_widgets.widgets if ( mouse_over is not self.mouse_over or hover_over is not self.hover_over ): self._set_mouse_over(mouse_over, hover_over) self.call_after_refresh(check_mouse) def capture_mouse(self, widget: Widget | None) -> None: """Send all mouse events to the given widget or disable mouse capture. Normally mouse events are sent to the widget directly under the pointer. Capturing the mouse allows a widget to receive mouse events even when the pointer is over another widget. Args: widget: Widget to capture mouse events, or `None` to end mouse capture. """ if widget == self.mouse_captured: return if self.mouse_captured is not None: self.mouse_captured.post_message(events.MouseRelease(self.mouse_position)) self.mouse_captured = widget if widget is not None: widget.post_message(events.MouseCapture(self.mouse_position)) def panic(self, *renderables: RenderableType) -> None: """Exits the app and display error message(s). Used in response to unexpected errors. For a more graceful exit, see the [exit][textual.app.App.exit] method. Args: *renderables: Text or Rich renderable(s) to display on exit. """ assert all( is_renderable(renderable) for renderable in renderables ), "Can only call panic with strings or Rich renderables" def render(renderable: RenderableType) -> list[Segment]: """Render a panic renderables.""" segments = list(self.console.render(renderable, self.console.options)) return segments pre_rendered = [Segments(render(renderable)) for renderable in renderables] self._exit_renderables.extend(pre_rendered) self._close_messages_no_wait() def _handle_exception(self, error: Exception) -> None: """Called with an unhandled exception. Always results in the app exiting. Args: error: An exception instance. """ self._return_code = 1 # If we're running via pilot and this is the first exception encountered, # take note of it so that we can re-raise for test frameworks later. if self._exception is None: self._exception = error self._exception_event.set() if hasattr(error, "__rich__"): # Exception has a rich method, so we can defer to that for the rendering self.panic(error) else: # Use default exception rendering self._fatal_error() def _fatal_error(self) -> None: """Exits the app after an unhandled exception.""" from rich.traceback import Traceback self.bell() traceback = Traceback( show_locals=True, width=None, locals_max_length=5, suppress=[rich] ) self._exit_renderables.append( Segments(self.console.render(traceback, self.console.options)) ) self._close_messages_no_wait() def _print_error_renderables(self) -> None: """Print and clear exit renderables.""" error_count = len(self._exit_renderables) if "debug" in self.features: for renderable in self._exit_renderables: self.error_console.print(renderable) if error_count > 1: self.error_console.print( f"\n[b]NOTE:[/b] {error_count} errors shown above.", markup=True ) elif self._exit_renderables: self.error_console.print(self._exit_renderables[0]) if error_count > 1: self.error_console.print( f"\n[b]NOTE:[/b] 1 of {error_count} errors shown. Run with [b]textual run --dev[/] to see all errors.", markup=True, ) self._exit_renderables.clear() def _build_driver( self, headless: bool, inline: bool, mouse: bool, size: tuple[int, int] | None ) -> Driver: """Construct a driver instance. Args: headless: Request headless driver. inline: Request inline driver. mouse: Request mouse support. size: Initial size. Returns: Driver instance. """ driver: Driver driver_class: type[Driver] if headless: from textual.drivers.headless_driver import HeadlessDriver driver_class = HeadlessDriver elif inline and not WINDOWS: from textual.drivers.linux_inline_driver import LinuxInlineDriver driver_class = LinuxInlineDriver else: driver_class = self.driver_class driver = self._driver = driver_class( self, debug=constants.DEBUG, mouse=mouse, size=size, ) return driver async def _init_devtools(self): """Initialize developer tools.""" if self.devtools is not None: from textual_dev.client import DevtoolsConnectionError try: await self.devtools.connect() self.log.system(f"Connected to devtools ( {self.devtools.url} )") except DevtoolsConnectionError: self.log.system(f"Couldn't connect to devtools ( {self.devtools.url} )") async def _process_messages( self, ready_callback: CallbackType | None = None, headless: bool = False, inline: bool = False, inline_no_clear: bool = False, mouse: bool = True, terminal_size: tuple[int, int] | None = None, message_hook: Callable[[Message], None] | None = None, ) -> None: self._thread_init() async def app_prelude() -> bool: """Work required before running the app. Returns: `True` if the app should continue, or `False` if there was a problem starting. """ await self._init_devtools() self.log.system("---") self.log.system(loop=asyncio.get_running_loop()) self.log.system(features=self.features) if constants.LOG_FILE is not None: _log_path = os.path.abspath(constants.LOG_FILE) self.log.system(f"Writing logs to {_log_path!r}") try: if self.css_path: self.stylesheet.read_all(self.css_path) for read_from, css, tie_breaker, scope in self._get_default_css(): self.stylesheet.add_source( css, read_from=read_from, is_default_css=True, tie_breaker=tie_breaker, scope=scope, ) if self.CSS: try: app_path = inspect.getfile(self.__class__) except (TypeError, OSError): app_path = "" read_from = (app_path, f"{self.__class__.__name__}.CSS") self.stylesheet.add_source( self.CSS, read_from=read_from, is_default_css=False ) except Exception as error: self._handle_exception(error) self._print_error_renderables() return False if self.css_monitor: self.set_interval(0.25, self.css_monitor, name="css monitor") self.log.system("STARTED", self.css_monitor) return True async def run_process_messages(): """The main message loop, invoke below.""" async def invoke_ready_callback() -> None: if ready_callback is not None: ready_result = ready_callback() if inspect.isawaitable(ready_result): await ready_result with self.batch_update(): try: try: await self._dispatch_message(events.Compose()) await self._dispatch_message( events.Resize.from_dimensions(self.size, None) ) default_screen = self.screen self.stylesheet.apply(self) await self._dispatch_message(events.Mount()) self.check_idle() finally: self._mounted_event.set() self._is_mounted = True Reactive._initialize_object(self) if self.screen is not default_screen: self.stylesheet.apply(default_screen) await self.animator.start() except Exception: await self.animator.stop() raise finally: self._running = True await self._ready() await invoke_ready_callback() try: await self._process_messages_loop() except asyncio.CancelledError: pass finally: self.workers.cancel_all() self._running = False try: await self.animator.stop() finally: await Timer._stop_all(self._timers) with self._context(): if not await app_prelude(): return self._running = True try: load_event = events.Load() await self._dispatch_message(load_event) driver = self._driver = self._build_driver( headless=headless, inline=inline, mouse=mouse, size=terminal_size, ) self.log(driver=driver) if not self._exit: driver.start_application_mode() try: with redirect_stdout(self._capture_stdout): with redirect_stderr(self._capture_stderr): await run_process_messages() finally: Reactive._clear_watchers(self) if self._driver.is_inline: cursor_x, cursor_y = self._previous_cursor_position self._driver.write( Control.move(-cursor_x, -cursor_y).segment.text ) self._driver.flush() if inline_no_clear and not self.app._exit_renderables: console = Console() try: console.print(self.screen._compositor) except ScreenStackError: console.print() else: self._driver.write( Control.move(0, -self.INLINE_PADDING).segment.text ) driver.stop_application_mode() except Exception as error: self._handle_exception(error) async def _pre_process(self) -> bool: """Special case for the app, which doesn't need the functionality in MessagePump.""" return True async def _ready(self) -> None: """Called immediately prior to processing messages. May be used as a hook for any operations that should run first. """ ready_time = (perf_counter() - self._start_time) * 1000 self.log.system(f"ready in {ready_time:0.0f} milliseconds") async def take_screenshot() -> None: """Take a screenshot and exit.""" self.save_screenshot( path=constants.SCREENSHOT_LOCATION, filename=constants.SCREENSHOT_FILENAME, ) self.exit() if constants.SCREENSHOT_DELAY >= 0: self.set_timer( constants.SCREENSHOT_DELAY, take_screenshot, name="screenshot timer" ) async def _on_compose(self) -> None: _rich_traceback_omit = True self._compose_screen = self.screen try: widgets = [*self.screen._nodes, *compose(self)] except TypeError as error: raise TypeError( f"{self!r} compose() method returned an invalid result; {error}" ) from error await self.mount_all(widgets) async def _check_recompose(self) -> None: """Check if a recompose is required.""" if self._recompose_required: self._recompose_required = False await self.recompose() async def recompose(self) -> None: """Recompose the widget. Recomposing will remove children and call `self.compose` again to remount. """ if self._exit: return try: async with self.screen.batch(): await self.screen.query("*").exclude(".-textual-system").remove() await self.screen.mount_all(compose(self)) except ScreenStackError: pass def _register_child( self, parent: DOMNode, child: Widget, before: int | None, after: int | None ) -> None: """Register a widget as a child of another. Args: parent: Parent node. child: The child widget to register. before: A location to mount before. after: A location to mount after. """ # Let's be 100% sure that we've not been asked to do a before and an # after at the same time. It's possible that we can remove this # check later on, but for the purposes of development right now, # it's likely a good idea to keep it here to check assumptions in # the rest of the code. if before is not None and after is not None: raise AppError("Only one of 'before' and 'after' may be specified.") # If we don't already know about this widget... if child not in self._registry: # Now to figure out where to place it. If we've got a `before`... if before is not None: # ...it's safe to NodeList._insert before that location. parent._nodes._insert(before, child) elif after is not None and after != -1: # In this case we've got an after. -1 holds the special # position (for now) of meaning "okay really what I mean is # do an append, like if I'd asked to add with no before or # after". So... we insert before the next item in the node # list, if after isn't -1. parent._nodes._insert(after + 1, child) else: # At this point we appear to not be adding before or after, # or we've got a before/after value that really means # "please append". So... parent._nodes._append(child) # Now that the widget is in the NodeList of its parent, sort out # the rest of the admin. self._registry.add(child) child._attach(parent) child._post_register(self) def _register( self, parent: DOMNode, *widgets: Widget, before: int | None = None, after: int | None = None, cache: dict[tuple, RulesMap] | None = None, ) -> list[Widget]: """Register widget(s) so they may receive events. Args: parent: Parent node. *widgets: The widget(s) to register. before: A location to mount before. after: A location to mount after. cache: Optional rules map cache. Returns: List of modified widgets. """ if not widgets: return [] if cache is None: cache = {} widget_list: Iterable[Widget] if before is not None or after is not None: # There's a before or after, which means there's going to be an # insertion, so make it easier to get the new things in the # correct order. widget_list = reversed(widgets) else: widget_list = widgets apply_stylesheet = self.stylesheet.apply new_widgets: list[Widget] = [] add_new_widget = new_widgets.append for widget in widget_list: widget._closing = False widget._closed = False widget._pruning = False if not isinstance(widget, Widget): raise AppError(f"Can't register {widget!r}; expected a Widget instance") if widget not in self._registry: add_new_widget(widget) self._register_child(parent, widget, before, after) if widget._nodes: self._register(widget, *widget._nodes, cache=cache) for widget in new_widgets: apply_stylesheet(widget, cache=cache) widget._start_messages() if not self._running: # If the app is not running, prevent awaiting of the widget tasks return [] return list(widgets) def _unregister(self, widget: Widget) -> None: """Unregister a widget. Args: widget: A Widget to unregister """ widget.blur() if isinstance(widget._parent, Widget): widget._parent._nodes._remove(widget) widget._detach() self._registry.discard(widget) async def _disconnect_devtools(self): if self.devtools is not None: await self.devtools.disconnect() def _start_widget(self, parent: Widget, widget: Widget) -> None: """Start a widget (run its task) so that it can receive messages. Args: parent: The parent of the Widget. widget: The Widget to start. """ widget._attach(parent) widget._start_messages() self.app._registry.add(widget) def is_mounted(self, widget: Widget) -> bool: """Check if a widget is mounted. Args: widget: A widget. Returns: True of the widget is mounted. """ return widget in self._registry async def _close_all(self) -> None: """Close all message pumps.""" # Close all screens on all stacks: for stack in self._screen_stacks.values(): for stack_screen in reversed(stack): if stack_screen._running: await self._prune(stack_screen) stack.clear() self._installed_screens.clear() self._modes.clear() # Close any remaining nodes # Should be empty by now remaining_nodes = list(self._registry) for child in remaining_nodes: await child._close_messages() async def _shutdown(self) -> None: self._begin_batch() # Prevents any layout / repaint while shutting down driver = self._driver self._running = False if driver is not None: driver.disable_input() await self._close_all() await self._close_messages() await self._dispatch_message(events.Unmount()) if self._driver is not None: self._driver.close() self._nodes._clear() if self.devtools is not None and self.devtools.is_connected: await self._disconnect_devtools() self._print_error_renderables() if constants.SHOW_RETURN: from rich.console import Console from rich.pretty import Pretty console = Console() console.print("[b]The app returned:") console.print(Pretty(self._return_value)) async def _on_exit_app(self) -> None: self._begin_batch() # Prevent repaint / layout while shutting down self._message_queue.put_nowait(None) def refresh( self, *, repaint: bool = True, layout: bool = False, recompose: bool = False, ) -> Self: """Refresh the entire screen. Args: repaint: Repaint the widget (will call render() again). layout: Also layout widgets in the view. recompose: Re-compose the widget (will remove and re-mount children). Returns: The `App` instance. """ if recompose: self._recompose_required = recompose self.call_next(self._check_recompose) return self if self._screen_stack: self.screen.refresh(repaint=repaint, layout=layout) self.check_idle() return self def refresh_css(self, animate: bool = True) -> None: """Refresh CSS. Args: animate: Also execute CSS animations. """ stylesheet = self.app.stylesheet stylesheet.set_variables(self.get_css_variables()) stylesheet.reparse() stylesheet.update(self.app, animate=animate) try: if self.screen.is_mounted: self.screen._refresh_layout(self.size) self.screen._css_update_count = self._css_update_count except ScreenError: pass # The other screens in the stack will need to know about some style # changes, as a final pass let's check in on every screen that isn't # the current one and update them too. for screen in self.screen_stack: if screen != self.screen: stylesheet.update(screen, animate=animate) screen._css_update_count = self._css_update_count def _display(self, screen: Screen, renderable: RenderableType | None) -> None: """Display a renderable within a sync. Args: screen: Screen instance renderable: A Rich renderable. """ try: if renderable is None: return if self._batch_count: return if ( self._running and not self._closed and not self.is_headless and self._driver is not None ): console = self.console self._begin_update() try: try: if isinstance(renderable, CompositorUpdate): cursor_position = self.screen.outer_size.clamp_offset( self.cursor_position ) if self._driver.is_inline: terminal_sequence = Control.move( *(-self._previous_cursor_position) ).segment.text terminal_sequence += renderable.render_segments(console) terminal_sequence += Control.move( *cursor_position ).segment.text else: terminal_sequence = renderable.render_segments(console) terminal_sequence += Control.move_to( *cursor_position ).segment.text self._previous_cursor_position = cursor_position else: segments = console.render(renderable) terminal_sequence = console._render_buffer(segments) except Exception as error: self._handle_exception(error) else: if WINDOWS: # Combat a problem with Python on Windows. # # https://github.com/Textualize/textual/issues/2548 # https://github.com/python/cpython/issues/82052 CHUNK_SIZE = 8192 write = self._driver.write for chunk in ( terminal_sequence[offset : offset + CHUNK_SIZE] for offset in range( 0, len(terminal_sequence), CHUNK_SIZE ) ): write(chunk) else: self._driver.write(terminal_sequence) finally: self._end_update() self._driver.flush() finally: self.post_display_hook() def post_display_hook(self) -> None: """Called immediately after a display is done. Used in tests.""" def get_widget_at(self, x: int, y: int) -> tuple[Widget, Region]: """Get the widget under the given coordinates. Args: x: X coordinate. y: Y coordinate. Returns: The widget and the widget's screen region. """ return self.screen.get_widget_at(x, y) def bell(self) -> None: """Play the console 'bell'. For terminals that support a bell, this typically makes a notification or error sound. Some terminals may make no sound or display a visual bell indicator, depending on configuration. """ if not self.is_headless and self._driver is not None: self._driver.write("\07") @property def _binding_chain(self) -> list[tuple[DOMNode, BindingsMap]]: """Get a chain of nodes and bindings to consider. If no widget is focused, returns the bindings from both the screen and the app level bindings. Otherwise, combines all the bindings from the currently focused node up the DOM to the root App. """ focused = self.focused namespace_bindings: list[tuple[DOMNode, BindingsMap]] if focused is None: namespace_bindings = [ (self.screen, self.screen._bindings), (self, self._bindings), ] else: namespace_bindings = [ (node, node._bindings) for node in focused.ancestors_with_self ] return namespace_bindings def simulate_key(self, key: str) -> None: """Simulate a key press. This will perform the same action as if the user had pressed the key. Args: key: Key to simulate. May also be the name of a key, e.g. "space". """ self.post_message(events.Key(key, None)) async def _check_bindings(self, key: str, priority: bool = False) -> bool: """Handle a key press. This method is used internally by the bindings system. Args: key: A key. priority: If `True` check from `App` down, otherwise from focused up. Returns: True if the key was handled by a binding, otherwise False """ for namespace, bindings in ( reversed(self.screen._binding_chain) if priority else self.screen._modal_binding_chain ): key_bindings = bindings.key_to_bindings.get(key, ()) for binding in key_bindings: if binding.priority == priority: if await self.run_action(binding.action, namespace): return True return False def action_help_quit(self) -> None: """Bound to ctrl+C to alert the user that it no longer quits.""" # Doing this because users will reflexively hit ctrl+C to exit # Ctrl+C is now bound to copy if an input / textarea is focused. # This makes is possible, even likely, that a user may do it accidentally -- which would be maddening. # Rather than do nothing, we can make an educated guess the user was trying # to quit, and inform them how you really quit. for key, active_binding in self.active_bindings.items(): if active_binding.binding.action in ("quit", "app.quit"): self.notify( f"Press [b]{key}[/b] to quit the app", title="Do you want to quit?" ) return @classmethod def _normalize_keymap(cls, keymap: Keymap) -> Keymap: """Normalizes the keys in a keymap, so they use long form, i.e. "question_mark" rather than "?".""" return { binding_id: _normalize_key_list(keys) for binding_id, keys in keymap.items() } def set_keymap(self, keymap: Keymap) -> None: """Set the keymap, a mapping of binding IDs to key strings. Bindings in the keymap are used to override default key bindings, i.e. those defined in `BINDINGS` class variables. Bindings with IDs that are present in the keymap will have their key string replaced with the value from the keymap. Args: keymap: A mapping of binding IDs to key strings. """ self._keymap = self._normalize_keymap(keymap) self.refresh_bindings() def update_keymap(self, keymap: Keymap) -> None: """Update the App's keymap, merging with `keymap`. If a Binding ID exists in both the App's keymap and the `keymap` argument, the `keymap` argument takes precedence. Args: keymap: A mapping of binding IDs to key strings. """ self._keymap = {**self._keymap, **self._normalize_keymap(keymap)} self.refresh_bindings() def handle_bindings_clash( self, clashed_bindings: set[Binding], node: DOMNode ) -> None: """Handle a clash between bindings. Bindings clashes are likely due to users setting conflicting keys via their keymap. This method is intended to be overridden by subclasses. Textual will call this each time a clash is encountered - which may be on each keypress if a clashing widget is focused or is in the bindings chain. Args: clashed_bindings: The bindings that are clashing. node: The node that has the clashing bindings. """ pass async def on_event(self, event: events.Event) -> None: # Handle input events that haven't been forwarded # If the event has been forwarded it may have bubbled up back to the App if isinstance(event, events.Compose): await self._init_mode(self._current_mode) await super().on_event(event) elif isinstance(event, events.InputEvent) and not event.is_forwarded: if not self.app_focus and isinstance(event, (events.Key, events.MouseDown)): self.app_focus = True if isinstance(event, events.MouseEvent): # Record current mouse position on App self.mouse_position = Offset(event.x, event.y) if isinstance(event, events.MouseDown): try: self._mouse_down_widget, _ = self.get_widget_at( event.x, event.y ) except NoWidget: # Shouldn't occur, since at the very least this will find the Screen self._mouse_down_widget = None self.screen._forward_event(event) # If a MouseUp occurs at the same widget as a MouseDown, then we should # consider it a click, and produce a Click event. if ( isinstance(event, events.MouseUp) and self._mouse_down_widget is not None ): try: screen_offset = event.screen_offset mouse_down_widget = self._mouse_down_widget mouse_up_widget, _ = self.get_widget_at(*screen_offset) if mouse_up_widget is mouse_down_widget: same_offset = ( self._click_chain_last_offset is not None and self._click_chain_last_offset == screen_offset ) within_time_threshold = ( self._click_chain_last_time is not None and event.time - self._click_chain_last_time <= self.CLICK_CHAIN_TIME_THRESHOLD ) if same_offset and within_time_threshold: self._chained_clicks += 1 else: self._chained_clicks = 1 click_event = events.Click.from_event( mouse_down_widget, event, chain=self._chained_clicks ) self._click_chain_last_time = event.time self._click_chain_last_offset = screen_offset self.screen._forward_event(click_event) except NoWidget: pass elif isinstance(event, events.Key): # Special case for maximized widgets # If something is maximized, then escape should minimize if ( self.screen.maximized is not None and event.key == "escape" and self.escape_to_minimize ): self.screen.minimize() return if self.focused: try: self.screen._clear_tooltip() except NoScreen: pass if not await self._check_bindings(event.key, priority=True): forward_target = self.focused or self.screen forward_target._forward_event(event) else: self.screen._forward_event(event) elif isinstance(event, events.Paste) and not event.is_forwarded: if self.focused is not None: self.focused._forward_event(event) else: self.screen._forward_event(event) else: await super().on_event(event) @property def escape_to_minimize(self) -> bool: """Use the escape key to minimize? When a widget is [maximized][textual.screen.Screen.maximize], this boolean determines if the `escape` key will minimize the widget (potentially overriding any bindings). The default logic is to use the screen's `ESCAPE_TO_MINIMIZE` classvar if it is set to `True` or `False`. If the classvar on the screen is *not* set (and left as `None`), then the app's `ESCAPE_TO_MINIMIZE` is used. """ return bool( self.ESCAPE_TO_MINIMIZE if self.screen.ESCAPE_TO_MINIMIZE is None else self.screen.ESCAPE_TO_MINIMIZE ) def _parse_action( self, action: str | ActionParseResult, default_namespace: DOMNode, namespaces: Mapping[str, DOMNode] | None = None, ) -> tuple[DOMNode, str, tuple[object, ...]]: """Parse an action. Args: action: An action string. default_namespace: Namespace to user when none is supplied in the action. namespaces: Mapping of namespaces. Raises: ActionError: If there are any errors parsing the action string. Returns: A tuple of (node or None, action name, tuple of parameters). """ if isinstance(action, tuple): destination, action_name, params = action else: destination, action_name, params = actions.parse(action) action_target: DOMNode | None = ( None if namespaces is None else namespaces.get(destination) ) if destination and action_target is None: if destination not in self._action_targets: raise ActionError(f"Action namespace {destination} is not known") action_target = getattr(self, destination, None) if action_target is None: raise ActionError(f"Action target {destination!r} not available") return ( (default_namespace if action_target is None else action_target), action_name, params, ) def _check_action_state( self, action: str, default_namespace: DOMNode ) -> bool | None: """Check if an action is enabled. Args: action: An action string. default_namespace: The default namespace if one is not specified in the action. Returns: State of an action. """ action_target, action_name, parameters = self._parse_action( action, default_namespace ) return action_target.check_action(action_name, parameters) async def run_action( self, action: str | ActionParseResult, default_namespace: DOMNode | None = None, namespaces: Mapping[str, DOMNode] | None = None, ) -> bool: """Perform an [action](/guide/actions). Actions are typically associated with key bindings, where you wouldn't need to call this method manually. Args: action: Action encoded in a string. default_namespace: Namespace to use if not provided in the action, or None to use app. namespaces: Mapping of namespaces. Returns: True if the event has been handled. """ action_target, action_name, params = self._parse_action( action, self if default_namespace is None else default_namespace, namespaces ) if action_target.check_action(action_name, params): return await self._dispatch_action(action_target, action_name, params) else: return False async def _dispatch_action( self, namespace: DOMNode, action_name: str, params: Any ) -> bool: """Dispatch an action to an action method. Args: namespace: Namespace (object) of action. action_name: Name of the action. params: Action parameters. Returns: True if handled, otherwise False. """ _rich_traceback_guard = True log.system( "<action>", namespace=namespace, action_name=action_name, params=params, ) try: private_method = getattr(namespace, f"_action_{action_name}", None) if callable(private_method): await invoke(private_method, *params) return True public_method = getattr(namespace, f"action_{action_name}", None) if callable(public_method): await invoke(public_method, *params) return True log.system( f"<action> {action_name!r} has no target." f" Could not find methods '_action_{action_name}' or 'action_{action_name}'" ) except SkipAction: # The action method raised this to explicitly not handle the action log.system(f"<action> {action_name!r} skipped.") return False async def _broker_event( self, event_name: str, event: events.Event, default_namespace: DOMNode ) -> bool: """Allow the app an opportunity to dispatch events to action system. Args: event_name: _description_ event: An event object. default_namespace: The default namespace, where one isn't supplied. Returns: True if an action was processed. """ try: style = getattr(event, "style") except AttributeError: return False try: _modifiers, action = extract_handler_actions(event_name, style.meta) except NoHandler: return False else: event.stop() if isinstance(action, str): await self.run_action(action, default_namespace) elif isinstance(action, tuple) and len(action) == 2: action_name, action_params = action namespace, parsed_action, _ = actions.parse(action_name) await self.run_action( (namespace, parsed_action, action_params), default_namespace, ) else: if isinstance(action, tuple) and self.debug: # It's a tuple and made it this far, which means it'll be a # malformed action. This is a no-op, but let's log that # anyway. log.warning( f"Can't parse @{event_name} action from style meta; check your console markup syntax" ) return False return True async def _on_update(self, message: messages.Update) -> None: message.stop() async def _on_layout(self, message: messages.Layout) -> None: message.stop() async def _on_key(self, event: events.Key) -> None: if not (await self._check_bindings(event.key)): await dispatch_key(self, event) async def _on_resize(self, event: events.Resize) -> None: event.stop() self._size = event.size self._resize_event = event async def _on_app_focus(self, event: events.AppFocus) -> None: """App has focus.""" # Required by textual-web to manage focus in a web page. self.app_focus = True self.screen.refresh_bindings() async def _on_app_blur(self, event: events.AppBlur) -> None: """App has lost focus.""" # Required by textual-web to manage focus in a web page. self.app_focus = False self.screen.refresh_bindings() def _prune(self, *nodes: Widget, parent: DOMNode | None = None) -> AwaitRemove: """Prune nodes from DOM. Args: parent: Parent node. Returns: Optional awaitable. """ if not nodes: return AwaitRemove([]) pruning_nodes: set[Widget] = {*nodes} for node in nodes: node.post_message(Prune()) pruning_nodes.update(node.walk_children(with_self=True)) try: screen = nodes[0].screen except (ScreenStackError, NoScreen): pass else: if screen.focused and screen.focused in pruning_nodes: screen._reset_focus(screen.focused, list(pruning_nodes)) for node in pruning_nodes: node._pruning = True def post_mount() -> None: """Called after removing children.""" if parent is not None: try: screen = parent.screen except (ScreenStackError, NoScreen): pass else: if screen._running: self._update_mouse_over(screen) finally: parent.refresh(layout=True) await_complete = AwaitRemove( [task for node in nodes if (task := node._task) is not None], post_mount, ) self.call_next(await_complete) return await_complete def _watch_app_focus(self, focus: bool) -> None: """Respond to changes in app focus.""" self.screen._update_styles() if focus: # If we've got a last-focused widget, if it still has a screen, # and if the screen is still the current screen and if nothing # is focused right now... try: if ( self._last_focused_on_app_blur is not None and self._last_focused_on_app_blur.screen is self.screen and self.screen.focused is None ): # ...settle focus back on that widget. # Don't scroll the newly focused widget, as this can be quite jarring self.screen.set_focus( self._last_focused_on_app_blur, scroll_visible=False, from_app_focus=True, ) except NoScreen: pass # Now that we have focus back on the app and we don't need the # widget reference any more, don't keep it hanging around here. self._last_focused_on_app_blur = None else: # Remember which widget has focus, when the app gets focus back # we'll want to try and focus it again. self._last_focused_on_app_blur = self.screen.focused # Remove focus for now. self.screen.set_focus(None) async def action_simulate_key(self, key: str) -> None: """An [action](/guide/actions) to simulate a key press. This will invoke the same actions as if the user had pressed the key. Args: key: The key to process. """ self.simulate_key(key) async def action_quit(self) -> None: """An [action](/guide/actions) to quit the app as soon as possible.""" self.exit() async def action_bell(self) -> None: """An [action](/guide/actions) to play the terminal 'bell'.""" self.bell() async def action_focus(self, widget_id: str) -> None: """An [action](/guide/actions) to focus the given widget. Args: widget_id: ID of widget to focus. """ try: node = self.query(f"#{widget_id}").first() except NoMatches: pass else: if isinstance(node, Widget): self.set_focus(node) async def action_switch_screen(self, screen: str) -> None: """An [action](/guide/actions) to switch screens. Args: screen: Name of the screen. """ self.switch_screen(screen) async def action_push_screen(self, screen: str) -> None: """An [action](/guide/actions) to push a new screen on to the stack and make it active. Args: screen: Name of the screen. """ self.push_screen(screen) async def action_pop_screen(self) -> None: """An [action](/guide/actions) to remove the topmost screen and makes the new topmost screen active.""" self.pop_screen() async def action_switch_mode(self, mode: str) -> None: """An [action](/guide/actions) that switches to the given mode.""" self.switch_mode(mode) async def action_back(self) -> None: """An [action](/guide/actions) to go back to the previous screen (pop the current screen). Note: If there is no screen to go back to, this is a non-operation (in other words it's safe to call even if there are no other screens on the stack.) """ try: self.pop_screen() except ScreenStackError: pass async def action_add_class(self, selector: str, class_name: str) -> None: """An [action](/guide/actions) to add a CSS class to the selected widget. Args: selector: Selects the widget to add the class to. class_name: The class to add to the selected widget. """ self.screen.query(selector).add_class(class_name) async def action_remove_class(self, selector: str, class_name: str) -> None: """An [action](/guide/actions) to remove a CSS class from the selected widget. Args: selector: Selects the widget to remove the class from. class_name: The class to remove from the selected widget.""" self.screen.query(selector).remove_class(class_name) async def action_toggle_class(self, selector: str, class_name: str) -> None: """An [action](/guide/actions) to toggle a CSS class on the selected widget. Args: selector: Selects the widget to toggle the class on. class_name: The class to toggle on the selected widget. """ self.screen.query(selector).toggle_class(class_name) def action_toggle_dark(self) -> None: """An [action](/guide/actions) to toggle the theme between textual-light and textual-dark. This is offered as a convenience to simplify backwards compatibility with previous versions of Textual which only had light mode and dark mode.""" self.theme = ( "textual-dark" if self.theme == "textual-light" else "textual-light" ) def action_focus_next(self) -> None: """An [action](/guide/actions) to focus the next widget.""" self.screen.focus_next() def action_focus_previous(self) -> None: """An [action](/guide/actions) to focus the previous widget.""" self.screen.focus_previous() def action_hide_help_panel(self) -> None: """Hide the keys panel (if present).""" self.screen.query("HelpPanel").remove() def action_show_help_panel(self) -> None: """Show the keys panel.""" from textual.widgets import HelpPanel try: self.screen.query_one(HelpPanel) except NoMatches: self.screen.mount(HelpPanel()) def action_notify( self, message: str, title: str = "", severity: str = "information" ) -> None: """Show a notification.""" self.notify(message, title=title, severity=severity) def _on_terminal_supports_synchronized_output( self, message: messages.TerminalSupportsSynchronizedOutput ) -> None: log.system("SynchronizedOutput mode is supported") if self._driver is not None and not self._driver.is_inline: self._sync_available = True def _begin_update(self) -> None: if self._sync_available and self._driver is not None: self._driver.write(SYNC_START) def _end_update(self) -> None: if self._sync_available and self._driver is not None: self._driver.write(SYNC_END) def _refresh_notifications(self) -> None: """Refresh the notifications on the current screen, if one is available.""" # If we've got a screen to hand... try: screen = self.screen except ScreenStackError: pass else: try: # ...see if it has a toast rack. toast_rack = screen.get_child_by_type(ToastRack) except NoMatches: # It doesn't. That's fine. Either there won't ever be one, # or one will turn up. Things will work out later. return # Update the toast rack. self.call_later(toast_rack.show, self._notifications) def clear_selection(self) -> None: """Clear text selection on the active screen.""" try: self.screen.clear_selection() except NoScreen: pass def notify( self, message: str, *, title: str = "", severity: SeverityLevel = "information", timeout: float | None = None, markup: bool = True, ) -> None: """Create a notification. !!! tip This method is thread-safe. Args: message: The message for the notification. title: The title for the notification. severity: The severity of the notification. timeout: The timeout (in seconds) for the notification, or `None` for default. markup: Render the message as content markup? The `notify` method is used to create an application-wide notification, shown in a [`Toast`][textual.widgets._toast.Toast], normally originating in the bottom right corner of the display. Notifications can have the following severity levels: - `information` - `warning` - `error` The default is `information`. Example: ```python # Show an information notification. self.notify("It's an older code, sir, but it checks out.") # Show a warning. Note that Textual's notification system allows # for the use of Rich console markup. self.notify( "Now witness the firepower of this fully " "[b]ARMED[/b] and [i][b]OPERATIONAL[/b][/i] battle station!", title="Possible trap detected", severity="warning", ) # Show an error. Set a longer timeout so it's noticed. self.notify("It's a trap!", severity="error", timeout=10) # Show an information notification, but without any sort of title. self.notify("It's against my programming to impersonate a deity.", title="") ``` """ if timeout is None: timeout = self.NOTIFICATION_TIMEOUT notification = Notification(message, title, severity, timeout, markup=markup) self.post_message(Notify(notification)) def _on_notify(self, event: Notify) -> None: """Handle notification message.""" self._notifications.add(event.notification) self._refresh_notifications() def _unnotify(self, notification: Notification, refresh: bool = True) -> None: """Remove a notification from the notification collection. Args: notification: The notification to remove. refresh: Flag to say if the display of notifications should be refreshed. """ del self._notifications[notification] if refresh: self._refresh_notifications() def clear_notifications(self) -> None: """Clear all the current notifications.""" self._notifications.clear() self._refresh_notifications() def action_command_palette(self) -> None: """Show the Textual command palette.""" if self.use_command_palette and not CommandPalette.is_open(self): self.push_screen(CommandPalette(id="--command-palette")) def _suspend_signal(self) -> None: """Signal that the application is being suspended.""" self.app_suspend_signal.publish(self) @on(Driver.SignalResume) def _resume_signal(self) -> None: """Signal that the application is being resumed from a suspension.""" self.app_resume_signal.publish(self) @contextmanager def suspend(self) -> Iterator[None]: """A context manager that temporarily suspends the app. While inside the `with` block, the app will stop reading input and emitting output. Other applications will have full control of the terminal, configured as it was before the app started running. When the `with` block ends, the application will start reading input and emitting output again. Example: ```python with self.suspend(): os.system("emacs -nw") ``` Raises: SuspendNotSupported: If the environment doesn't support suspending. !!! note Suspending the application is currently only supported on Unix-like operating systems and Microsoft Windows. Suspending is not supported in Textual Web. """ if self._driver is None: return if self._driver.can_suspend: # Publish a suspend signal *before* we suspend application mode. self._suspend_signal() self._driver.suspend_application_mode() # We're going to handle the start of the driver again so mark # this next part as such; the reason for this is that the code # the developer may be running could be in this process, and on # Unix-like systems the user may `action_suspend_process` the # app, and we don't want to have the driver auto-restart # application mode when the application comes back to the # foreground, in this context. with ( self._driver.no_automatic_restart(), redirect_stdout(sys.__stdout__), redirect_stderr(sys.__stderr__), ): yield # We're done with the dev's code so resume application mode. self._driver.resume_application_mode() # ...and publish a resume signal. self._resume_signal() self.refresh(layout=True) else: raise SuspendNotSupported( "App.suspend is not supported in this environment." ) def action_suspend_process(self) -> None: """Suspend the process into the background. Note: On Unix and Unix-like systems a `SIGTSTP` is sent to the application's process. Currently on Windows and when running under Textual Web this is a non-operation. """ # Check if we're in an environment that permits this kind of # suspend. if not WINDOWS and self._driver is not None and self._driver.can_suspend: # First, ensure that the suspend signal gets published while # we're still in application mode. self._suspend_signal() # With that out of the way, send the SIGTSTP signal. os.kill(os.getpid(), signal.SIGTSTP) # NOTE: There is no call to publish the resume signal here, this # will be handled by the driver posting a SignalResume event # (see the event handler on App._resume_signal) above. def open_url(self, url: str, *, new_tab: bool = True) -> None: """Open a URL in the default web browser. Args: url: The URL to open. new_tab: Whether to open the URL in a new tab. """ if self._driver is not None: self._driver.open_url(url, new_tab) def deliver_text( self, path_or_file: str | Path | TextIO, *, save_directory: str | Path | None = None, save_filename: str | None = None, open_method: Literal["browser", "download"] = "download", encoding: str | None = None, mime_type: str | None = None, name: str | None = None, ) -> str | None: """Deliver a text file to the end-user of the application. If a TextIO object is supplied, it will be closed by this method and *must not be used* after this method is called. If running in a terminal, this will save the file to the user's downloads directory. If running via a web browser, this will initiate a download via a single-use URL. After the file has been delivered, a `DeliveryComplete` message will be posted to this `App`, which contains the `delivery_key` returned by this method. By handling this message, you can add custom logic to your application that fires only after the file has been delivered. Args: path_or_file: The path or file-like object to save. save_directory: The directory to save the file to. save_filename: The filename to save the file to. If `path_or_file` is a file-like object, the filename will be generated from the `name` attribute if available. If `path_or_file` is a path the filename will be generated from the path. encoding: The encoding to use when saving the file. If `None`, the encoding will be determined by supplied file-like object (if possible). If this is not possible, 'utf-8' will be used. mime_type: The MIME type of the file or None to guess based on file extension. If no MIME type is supplied and we cannot guess the MIME type, from the file extension, the MIME type will be set to "text/plain". name: A user-defined named which will be returned in [`DeliveryComplete`][textual.events.DeliveryComplete] and [`DeliveryComplete`][textual.events.DeliveryComplete]. Returns: The delivery key that uniquely identifies the file delivery. """ # Ensure `path_or_file` is a file-like object - convert if needed. if isinstance(path_or_file, (str, Path)): binary_path = Path(path_or_file) binary = binary_path.open("rb") file_name = save_filename or binary_path.name else: encoding = encoding or getattr(path_or_file, "encoding", None) or "utf-8" binary = path_or_file file_name = save_filename or getattr(path_or_file, "name", None) # If we could infer a filename, and no MIME type was supplied, guess the MIME type. if file_name and not mime_type: mime_type, _ = mimetypes.guess_type(file_name) # Still no MIME type? Default it to "text/plain". if mime_type is None: mime_type = "text/plain" return self._deliver_binary( binary, save_directory=save_directory, save_filename=file_name, open_method=open_method, encoding=encoding, mime_type=mime_type, name=name, ) def deliver_binary( self, path_or_file: str | Path | BinaryIO, *, save_directory: str | Path | None = None, save_filename: str | None = None, open_method: Literal["browser", "download"] = "download", mime_type: str | None = None, name: str | None = None, ) -> str | None: """Deliver a binary file to the end-user of the application. If an IO object is supplied, it will be closed by this method and *must not be used* after it is supplied to this method. If running in a terminal, this will save the file to the user's downloads directory. If running via a web browser, this will initiate a download via a single-use URL. This operation runs in a thread when running on web, so this method returning does not indicate that the file has been delivered. After the file has been delivered, a `DeliveryComplete` message will be posted to this `App`, which contains the `delivery_key` returned by this method. By handling this message, you can add custom logic to your application that fires only after the file has been delivered. Args: path_or_file: The path or file-like object to save. save_directory: The directory to save the file to. If None, the default "downloads" directory will be used. This argument is ignored when running via the web. save_filename: The filename to save the file to. If None, the following logic applies to generate the filename: - If `path_or_file` is a file-like object, the filename will be taken from the `name` attribute if available. - If `path_or_file` is a path, the filename will be taken from the path. - If a filename is not available, a filename will be generated using the App's title and the current date and time. open_method: The method to use to open the file. "browser" will open the file in the web browser, "download" will initiate a download. Note that this can sometimes be impacted by the browser's settings. mime_type: The MIME type of the file or None to guess based on file extension. If no MIME type is supplied and we cannot guess the MIME type, from the file extension, the MIME type will be set to "application/octet-stream". name: A user-defined named which will be returned in [`DeliveryComplete`][textual.events.DeliveryComplete] and [`DeliveryComplete`][textual.events.DeliveryComplete]. Returns: The delivery key that uniquely identifies the file delivery. """ # Ensure `path_or_file` is a file-like object - convert if needed. if isinstance(path_or_file, (str, Path)): binary_path = Path(path_or_file) binary = binary_path.open("rb") file_name = save_filename or binary_path.name else: # IO object binary = path_or_file file_name = save_filename or getattr(path_or_file, "name", None) # If we could infer a filename, and no MIME type was supplied, guess the MIME type. if file_name and not mime_type: mime_type, _ = mimetypes.guess_type(file_name) # Still no MIME type? Default it to "application/octet-stream". if mime_type is None: mime_type = "application/octet-stream" return self._deliver_binary( binary, save_directory=save_directory, save_filename=file_name, open_method=open_method, mime_type=mime_type, encoding=None, name=name, ) def _deliver_binary( self, binary: BinaryIO | TextIO, *, save_directory: str | Path | None, save_filename: str | None, open_method: Literal["browser", "download"], encoding: str | None = None, mime_type: str | None = None, name: str | None = None, ) -> str | None: """Deliver a binary file to the end-user of the application.""" if self._driver is None: return None # Generate a filename if the file-like object doesn't have one. if save_filename is None: save_filename = generate_datetime_filename(self.title, "") # Find the appropriate save location if not specified. save_directory = ( user_downloads_path() if save_directory is None else Path(save_directory) ) # Generate a unique key for this delivery delivery_key = str(uuid.uuid4().hex) # Save the file. The driver will determine the appropriate action # to take here. It could mean simply writing to the save_path, or # sending the file to the web browser for download. self._driver.deliver_binary( binary, delivery_key=delivery_key, save_path=save_directory / save_filename, encoding=encoding, open_method=open_method, mime_type=mime_type, name=name, ) return delivery_key @on(events.DeliveryComplete) def _on_delivery_complete(self, event: events.DeliveryComplete) -> None: """Handle a successfully delivered screenshot.""" if event.name == "screenshot": if event.path is None: self.notify("Saved screenshot", title="Screenshot") else: self.notify( f"Saved screenshot to [$text-success]{str(event.path)!r}", title="Screenshot", ) @on(events.DeliveryFailed) def _on_delivery_failed(self, event: events.DeliveryComplete) -> None: """Handle a failure to deliver the screenshot.""" if event.name == "screenshot": self.notify( "Failed to save screenshot", title="Screenshot", severity="error" ) @on(messages.InBandWindowResize) def _on_in_band_window_resize(self, message: messages.InBandWindowResize) -> None: """In band window resize enables smooth scrolling.""" self.supports_smooth_scrolling = message.enabled self.log.debug(message) def _on_idle(self) -> None: """Send app resize events on idle, so we don't do more resizing that necessary.""" event = self._resize_event if event is not None: self._resize_event = None self.screen.post_message(event) for screen in self._background_screens: screen.post_message(event)
App
python
sphinx-doc__sphinx
tests/roots/test-ext-autodoc/target/typehints.py
{ "start": 1514, "end": 1872 }
class ____(metaclass=_MetaclassWithCall): pass def complex_func(arg1, arg2, arg3=None, *args, **kwargs): # type: (str, List[int], Tuple[int, Union[str, Unknown]], *str, **str) -> None pass def missing_attr( c, a, # type: str b=None, # type: Optional[str] ): # type: (...) -> str return a + (b or '')
SignatureFromMetaclass
python
django__django
django/contrib/auth/views.py
{ "start": 7791, "end": 8983 }
class ____(PasswordContextMixin, FormView): email_template_name = "registration/password_reset_email.html" extra_email_context = None form_class = PasswordResetForm from_email = None html_email_template_name = None subject_template_name = "registration/password_reset_subject.txt" success_url = reverse_lazy("password_reset_done") template_name = "registration/password_reset_form.html" title = _("Password reset") token_generator = default_token_generator def form_valid(self, form): opts = { "use_https": self.request.is_secure(), "token_generator": self.token_generator, "from_email": self.from_email, "email_template_name": self.email_template_name, "subject_template_name": self.subject_template_name, "request": self.request, "html_email_template_name": self.html_email_template_name, "extra_email_context": self.extra_email_context, } form.save(**opts) return super().form_valid(form) INTERNAL_RESET_SESSION_TOKEN = "_password_reset_token" @method_decorator(login_not_required, name="dispatch")
PasswordResetView
python
huggingface__transformers
tests/models/marian/test_modeling_marian.py
{ "start": 21841, "end": 22748 }
class ____(MarianIntegrationTest): """Multilingual on target side.""" src = "en" tgt = "ROMANCE" src_text = [ ">>fr<< Don't spend so much time watching TV.", ">>pt<< Your message has been sent.", ">>es<< He's two years older than me.", ] expected_text = [ "Ne passez pas autant de temps à regarder la télé.", "A sua mensagem foi enviada.", "Es dos años más viejo que yo.", ] @slow def test_batch_generation_en_ROMANCE_multi(self): self._assert_generated_batch_equal_expected() @slow @require_torch def test_pipeline(self): pipeline = TranslationPipeline(self.model, self.tokenizer, device=torch_device) output = pipeline(self.src_text) self.assertEqual(self.expected_text, [x["translation_text"] for x in output]) @require_sentencepiece @require_tokenizers
TestMarian_en_ROMANCE
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 185918, "end": 186455 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("pull_request_review_comment_id", "body", "client_mutation_id") pull_request_review_comment_id = sgqlc.types.Field( sgqlc.types.non_null(ID), graphql_name="pullRequestReviewCommentId" ) body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
UpdatePullRequestReviewCommentInput
python
django__django
django/utils/datastructures.py
{ "start": 8376, "end": 10290 }
class ____(Mapping): """ Mapping allowing case-insensitive key lookups. Original case of keys is preserved for iteration and string representation. Example:: >>> ci_map = CaseInsensitiveMapping({'name': 'Jane'}) >>> ci_map['Name'] Jane >>> ci_map['NAME'] Jane >>> ci_map['name'] Jane >>> ci_map # original case preserved {'name': 'Jane'} """ def __init__(self, data): self._store = {k.lower(): (k, v) for k, v in self._unpack_items(data)} def __getitem__(self, key): return self._store[key.lower()][1] def __len__(self): return len(self._store) def __eq__(self, other): return isinstance(other, Mapping) and { k.lower(): v for k, v in self.items() } == {k.lower(): v for k, v in other.items()} def __iter__(self): return (original_key for original_key, value in self._store.values()) def __repr__(self): return repr({key: value for key, value in self._store.values()}) def copy(self): return self @staticmethod def _unpack_items(data): # Explicitly test for dict first as the common case for performance, # avoiding abc's __instancecheck__ and _abc_instancecheck for the # general Mapping case. if isinstance(data, (dict, Mapping)): yield from data.items() return for i, elem in enumerate(data): if len(elem) != 2: raise ValueError( "dictionary update sequence element #{} has length {}; " "2 is required.".format(i, len(elem)) ) if not isinstance(elem[0], str): raise ValueError( "Element key %r invalid, only strings are allowed" % elem[0] ) yield elem
CaseInsensitiveMapping
python
py-pdf__pypdf
pypdf/constants.py
{ "start": 9971, "end": 10193 }
class ____: S = "/S" # name, required: type of action D = "/D" # name, byte string, or array, required: destination to jump to SD = "/SD" # array, optional: structure destination to jump to
GoToActionArguments
python
apache__airflow
providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/powerbi.py
{ "start": 1419, "end": 1865 }
class ____(BaseOperatorLink): """Construct a link to monitor a dataset in Power BI.""" name = "Monitor PowerBI Dataset" def get_link(self, operator: BaseOperator, *, ti_key: TaskInstanceKey): url = ( "https://app.powerbi.com" f"/groups/{operator.group_id}/datasets/{operator.dataset_id}" # type: ignore[attr-defined] "/details?experience=power-bi" ) return url
PowerBILink
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/dataset_test.py
{ "start": 2075, "end": 25170 }
class ____(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate(test_base.default_test_combinations()) def testAsSerializedGraph(self): dataset = dataset_ops.Dataset.range(10) graph = graph_pb2.GraphDef().FromString( self.evaluate(dataset._as_serialized_graph())) self.assertTrue(any(node.op == "RangeDataset" for node in graph.node)) def testAsSerializedGraphStateful(self): dataset = dataset_ops.Dataset.range(10).map( lambda _: random_ops.random_uniform(())) with self.assertRaises(errors.FailedPreconditionError): self.evaluate( dataset._as_serialized_graph(external_state_policy=options_lib .ExternalStatePolicy.FAIL)) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( init_source=["textfile", "keyvaluetensor", "dataset"]))) def testLookupTableGraphSerialization(self, init_source): vals = [10, 11] initializer = self.lookupTableInitializer(init_source, vals) table = lookup_ops.StaticHashTable(initializer, -1) dataset = dataset_ops.Dataset.range(3) dataset = dataset.map(table.lookup) self.evaluate(lookup_ops.tables_initializer()) round_tripped = self.graphRoundTrip(dataset) del table del dataset self.assertDatasetProduces( round_tripped, [10, 11, -1], requires_initialization=True) @combinations.generate(test_base.eager_only_combinations()) def testAsFunctionWithMap(self): with ops.device("CPU"): original_dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2) fn = original_dataset._trace_variant_creation() variant = fn() revived_dataset = dataset_ops._VariantDataset( variant, original_dataset.element_spec) self.assertDatasetProduces(revived_dataset, range(0, 10, 2)) @combinations.generate(test_base.eager_only_combinations()) def testAsFunctionWithMapInFlatMap(self): with ops.device("CPU"): original_dataset = dataset_ops.Dataset.range(5).flat_map( lambda x: dataset_ops.Dataset.range(5).map(lambda x: x * 2)) fn = original_dataset._trace_variant_creation() variant = fn() revived_dataset = dataset_ops._VariantDataset( variant, original_dataset.element_spec) self.assertDatasetProduces(revived_dataset, list(original_dataset)) @combinations.generate(test_base.eager_only_combinations()) def testAsFunctionFromReader(self): with ops.device("CPU"): file_path = os.path.join(self.get_temp_dir(), "{}.tfrecord.gz".format("tf_record_asset")) with tf_record.TFRecordWriter(file_path, "GZIP") as f: for v in ["a", "aa", "aaa"]: f.write(str(v)) original_dataset = readers.TFRecordDataset([file_path], compression_type="GZIP") fn = original_dataset._trace_variant_creation() variant = fn() revived_dataset = dataset_ops._VariantDataset( variant, original_dataset.element_spec) self.assertDatasetProduces(revived_dataset, ["a", "aa", "aaa"]) def _testNumInputs(self, dataset, num_inputs): self.assertLen(dataset._inputs(), num_inputs) @combinations.generate(test_base.default_test_combinations()) def testFixedLengthRecordInputs(self): dataset = readers.FixedLengthRecordDataset("", 42) self._testNumInputs(dataset, 0) @combinations.generate(test_base.default_test_combinations()) def testFromGeneratorInputs(self): def gen(): yield 42 dataset = dataset_ops.Dataset.from_generator(gen, dtypes.int32) self._testNumInputs(dataset, 1) @combinations.generate(test_base.default_test_combinations()) def testFromTensorsInputs(self): dataset = dataset_ops.Dataset.from_tensors([42]) self._testNumInputs(dataset, 0) @combinations.generate(test_base.default_test_combinations()) def testRangeInputs(self): dataset = dataset_ops.Dataset.range(10) self._testNumInputs(dataset, 0) @combinations.generate(test_base.default_test_combinations()) def testTextLineInputs(self): dataset = readers.TextLineDataset("") self._testNumInputs(dataset, 0) @combinations.generate(test_base.default_test_combinations()) def testTFRecordInputs(self): dataset = readers.TFRecordDataset("") self._testNumInputs(dataset, 1) @combinations.generate( combinations.combine(tf_api_version=1, mode=["eager", "graph"])) def testDatasetComplexSourceInputs(self): dataset_fn = dataset_ops.Dataset.from_sparse_tensor_slices( sparse_tensor.SparseTensor( indices=np.array([[0, 0], [1, 0], [2, 0]]), values=np.array([0, 0, 0]), dense_shape=np.array([3, 1]))) self.assertEmpty(dataset_fn._inputs()) def _testUnaryInputs(self, dataset_fn): input_dataset = dataset_ops.Dataset.range(0) self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs()) @combinations.generate(test_base.default_test_combinations()) def testBatchInputs(self): self._testUnaryInputs(lambda x: x.batch(10)) @combinations.generate(test_base.default_test_combinations()) def testCacheInputs(self): self._testUnaryInputs(lambda x: x.cache()) @combinations.generate(test_base.default_test_combinations()) def testFilterInputs(self): self._testUnaryInputs(lambda x: x.filter(lambda x: True)) @combinations.generate(test_base.default_test_combinations()) def testFlatMapInputs(self): self._testUnaryInputs( lambda x: x.flat_map(lambda x: dataset_ops.Dataset.range(0))) @combinations.generate(test_base.default_test_combinations()) def testMapInputs(self): self._testUnaryInputs(lambda x: x.map(lambda x: x)) @combinations.generate(test_base.default_test_combinations()) def testPaddedBatchInputs(self): self._testUnaryInputs(lambda x: x.padded_batch(10, [])) @combinations.generate(test_base.default_test_combinations()) def testParallelMapInputs(self): self._testUnaryInputs(lambda x: x.map(lambda x: x, num_parallel_calls=2)) @combinations.generate(test_base.default_test_combinations()) def testRepeatInputs(self): self._testUnaryInputs(lambda x: x.repeat()) @combinations.generate(test_base.default_test_combinations()) def testShuffleInputs(self): self._testUnaryInputs(lambda x: x.shuffle(10)) @combinations.generate(test_base.default_test_combinations()) def testSkipInputs(self): self._testUnaryInputs(lambda x: x.skip(1)) @combinations.generate(test_base.default_test_combinations()) def testTakeInputs(self): self._testUnaryInputs(lambda x: x.take(1)) @combinations.generate(test_base.default_test_combinations()) def testWindowInputs(self): self._testUnaryInputs(lambda x: x.window(10)) @combinations.generate(test_base.default_test_combinations()) def testUnaryTransformationInputsApply(self): input_dataset = dataset_ops.Dataset.range(0) dataset = input_dataset.apply(lambda dataset: dataset.cache()) self.assertEqual([input_dataset], dataset._inputs()) def _testInputsWithInterleaveFn(self, dataset_fn, interleave_parallelism): input_dataset = dataset_ops.Dataset.range(0) dataset = input_dataset.interleave( lambda x: dataset_ops.Dataset.range(0), cycle_length=2, num_parallel_calls=interleave_parallelism) self.assertEqual([input_dataset], dataset._inputs()) @combinations.generate(test_base.default_test_combinations()) def testParallelInterleaveInputs(self): self._testInputsWithInterleaveFn(lambda: dataset_ops.range(0), 2) @combinations.generate(test_base.default_test_combinations()) def testInterleaveInputs(self): self._testInputsWithInterleaveFn(lambda: dataset_ops.range(0), None) @combinations.generate(test_base.default_test_combinations()) def testDebugString(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.map(lambda x: x**2) dataset = dataset.filter(lambda x: x > 10) debug_string = dataset.__debug_string__() for transformation in ["Range", "Map", "Filter"]: self.assertIn(transformation, debug_string) @combinations.generate(test_base.default_test_combinations()) def testNoWarnings(self): with test.mock.patch.object(warnings, "warn") as mock_log: dataset_ops.Dataset.range(0).interleave( lambda x: dataset_ops.Dataset.range(0), cycle_length=2) self.assertEmpty(mock_log.call_args_list) def _testBinaryInputs(self, dataset_fn): input1 = dataset_ops.Dataset.range(0) input2 = dataset_ops.Dataset.range(1) self.assertEqual([input1, input2], dataset_fn(input1, input2)._inputs()) @combinations.generate(test_base.default_test_combinations()) def testConcatenateInputs(self): self._testBinaryInputs(lambda x, y: x.concatenate(y)) def _testVariadicInputs(self, dataset_fn, input_datasets): self.assertEqual( nest.flatten(input_datasets), dataset_fn(input_datasets)._inputs()) @combinations.generate(test_base.default_test_combinations()) def testZipOneInputs(self): input_datasets = dataset_ops.Dataset.range(0) self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets) @combinations.generate(test_base.default_test_combinations()) def testZipNestInputs(self): input_datasets = (dataset_ops.Dataset.range(0), (dataset_ops.Dataset.range(1), dataset_ops.Dataset.range(2))) self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets) @combinations.generate(test_base.default_test_combinations()) def testZipTupleInputs(self): input_datasets = (dataset_ops.Dataset.range(0), dataset_ops.Dataset.range(1)) self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets) @combinations.generate(test_base.default_test_combinations()) def testFunctions(self): dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2) self.assertLen(dataset._functions(), 1) @combinations.generate(test_base.default_test_combinations()) def testCollectInputs(self): ds1 = dataset_ops.Dataset.range(0) ds2 = ds1.concatenate(ds1) ds3 = dataset_ops.Dataset.zip((ds2, ds1, ds2)) inputs = [] queue = [ds3] while queue: ds = queue[0] queue = queue[1:] queue.extend(ds._inputs()) inputs.append(ds) self.assertEqual(5, inputs.count(ds1)) self.assertEqual(2, inputs.count(ds2)) self.assertEqual(1, inputs.count(ds3)) def _testDatasetSpec(self, tf_value, expected_element_structure): dataset = dataset_ops.Dataset.from_tensors(0).map(lambda _: tf_value) dataset_structure = structure.type_spec_from_value(dataset) self.assertIsInstance(dataset_structure, dataset_ops.DatasetSpec) self.assertTrue( structure.are_compatible( dataset_ops.get_structure(dataset), expected_element_structure)) self.assertEqual([dtypes.variant], structure.get_flat_tensor_types(dataset_structure)) self.assertEqual([tensor_shape.TensorShape([])], structure.get_flat_tensor_shapes(dataset_structure)) # Assert that the `Dataset` survives a round-trip via _from_tensor_list() # and _to_tensor_list(). round_trip_dataset = dataset_structure._from_tensor_list( dataset_structure._to_tensor_list(dataset)) value = tf_value if isinstance(value, dataset_ops.Dataset): self.assertDatasetsEqual(value, dataset.flat_map(lambda x: x)) elif isinstance(value, optional_ops.Optional): self.assertDatasetProduces( round_trip_dataset.map(lambda opt: opt.get_value()), [self.evaluate(value.get_value())], requires_initialization=True) else: self.assertDatasetProduces( round_trip_dataset, [self.evaluate(tf_value)], requires_initialization=True) @combinations.generate(test_base.default_test_combinations()) def testTensorDatasetSpec(self): self._testDatasetSpec( constant_op.constant(37.0), tensor_spec.TensorSpec([], dtypes.float32)) @combinations.generate(test_base.default_test_combinations()) def testSparseTensorDatasetSpec(self): self._testDatasetSpec( sparse_tensor.SparseTensor( indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32), dense_shape=[1]), sparse_tensor.SparseTensorSpec([1], dtypes.int32)) @combinations.generate(test_base.default_test_combinations()) def testNestDatasetSpec(self): self._testDatasetSpec( { "a": constant_op.constant(37.0), "b": (constant_op.constant(["Foo"]), constant_op.constant("Bar")) }, { "a": tensor_spec.TensorSpec([], dtypes.float32), "b": ( tensor_spec.TensorSpec([1], dtypes.string), tensor_spec.TensorSpec([], dtypes.string), ) }) @combinations.generate(test_base.default_test_combinations()) def testDatasetDatasetSpec(self): self._testDatasetSpec( dataset_ops.Dataset.from_tensor_slices( constant_op.constant([1, 2, 3])), dataset_ops.DatasetSpec(tensor_spec.TensorSpec([], dtypes.int32))) @combinations.generate(test_base.default_test_combinations()) def testOptionalDatasetSpec(self): self._testDatasetSpec( optional_ops.Optional.from_value(37.0), optional_ops.OptionalSpec(tensor_spec.TensorSpec([], dtypes.float32))) @combinations.generate(test_base.graph_only_combinations()) def testSameGraphError(self): dataset = dataset_ops.Dataset.range(10) with ops.Graph().as_default(): with self.assertRaisesRegex(ValueError, "must be from the same graph"): dataset = dataset.batch(2) @combinations.generate( combinations.combine(tf_api_version=[1], mode=["graph"])) def testSameGraphErrorOneShot(self): dataset = dataset_ops.Dataset.range(10) with ops.Graph().as_default(): with self.assertRaisesRegex(ValueError, "Make sure that the dataset is created in " "the same graph as the iterator"): _ = dataset_ops.make_one_shot_iterator(dataset) @combinations.generate( combinations.combine(tf_api_version=[1], mode=["graph"])) def testSameGraphErrorInitializable(self): dataset = dataset_ops.Dataset.range(10) with ops.Graph().as_default(): with self.assertRaisesRegex(ValueError, "Make sure that the dataset is created in " "the same graph as the iterator"): _ = dataset_ops.make_initializable_iterator(dataset) @combinations.generate( combinations.times( test_base.eager_only_combinations(), combinations.combine(execution_mode=[context.ASYNC, context.SYNC]))) def testEagerIteration(self, execution_mode): with context.execution_mode(execution_mode): val = 0 dataset = dataset_ops.Dataset.range(10) for foo in dataset: self.assertEqual(val, foo.numpy()) val += 1 @combinations.generate(test_base.default_test_combinations()) def testDatasetAsFunctionArgument(self): @def_function.function def _uses_dataset(d): accumulator = array_ops.zeros([], dtype=dtypes.int64) for value in d: accumulator += value return accumulator with ops.device("CPU"): first_dataset = dataset_ops.Dataset.range(10) self.assertEqual(45, self.evaluate(_uses_dataset(first_dataset))) second_dataset = dataset_ops.Dataset.range(11) self.assertEqual(55, self.evaluate(_uses_dataset(second_dataset))) first_concrete = _uses_dataset.get_concrete_function(first_dataset) # The dataset should not be a captured input self.assertEmpty(first_concrete.graph.captures) # The two datasets have the same structure and so should re-use a trace. self.assertIs(first_concrete, _uses_dataset.get_concrete_function(second_dataset)) # With a different structure we should use a different trace. self.assertIsNot( first_concrete, _uses_dataset.get_concrete_function( dataset_ops.Dataset.zip((first_dataset, second_dataset)))) @combinations.generate(test_base.default_test_combinations()) def testLimitedRetracing(self): trace_count = [0] @def_function.function def f(ds): trace_count[0] += 1 counter = np.int64(0) for elem in ds: counter += elem return counter dataset = dataset_ops.Dataset.range(5) dataset2 = dataset_ops.Dataset.range(10) for _ in range(10): self.assertEqual(self.evaluate(f(dataset)), 10) self.assertEqual(self.evaluate(f(dataset2)), 45) self.assertEqual(trace_count[0], 1) # pylint: disable=g-long-lambda,unnecessary-lambda @combinations.generate(test_base.default_test_combinations()) def testLegacyStructureAPI(self): components = (np.array([1, 2, 3], dtype=np.int64), (np.array([4., 5.]), np.array([6., 7.])), np.array([8, 9, 10], dtype=np.int64)) dataset = dataset_ops.Dataset.from_tensors(components) self.assertEqual( (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64), dataset_ops.get_legacy_output_types(dataset)) self.assertEqual(([3], ([2], [2]), [3]), dataset_ops.get_legacy_output_shapes(dataset)) dataset = dataset.shuffle(10, 10) self.assertEqual( (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64), dataset_ops.get_legacy_output_types(dataset)) self.assertEqual(([3], ([2], [2]), [3]), dataset_ops.get_legacy_output_shapes(dataset)) dataset = dataset.repeat(-1) self.assertEqual( (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64), dataset_ops.get_legacy_output_types(dataset)) self.assertEqual(([3], ([2], [2]), [3]), dataset_ops.get_legacy_output_shapes(dataset)) dataset = dataset.filter(lambda x, y, z: True) self.assertEqual( (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64), dataset_ops.get_legacy_output_types(dataset)) self.assertEqual(([3], ([2], [2]), [3]), dataset_ops.get_legacy_output_shapes(dataset)) dataset = dataset.take(5) self.assertEqual( (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64), dataset_ops.get_legacy_output_types(dataset)) self.assertEqual(([3], ([2], [2]), [3]), dataset_ops.get_legacy_output_shapes(dataset)) dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1]))) self.assertEqual( ((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)), dataset_ops.get_legacy_output_types(dataset)) self.assertEqual((([3], [3]), ([2], [2])), dataset_ops.get_legacy_output_shapes(dataset)) dataset = dataset.flat_map(lambda x, y: dataset_ops.Dataset.from_tensors( ((x[0], x[1]), (y[0], y[1])))) self.assertEqual( ((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)), dataset_ops.get_legacy_output_types(dataset)) self.assertEqual((([3], [3]), ([2], [2])), dataset_ops.get_legacy_output_shapes(dataset)) dataset = dataset.batch(32) self.assertEqual( ((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)), dataset_ops.get_legacy_output_types(dataset)) dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset) self.assertEqual( (([None, 3], [None, 3]), ([None, 2], [None, 2])), nest.pack_sequence_as( dataset_output_shapes, [s.as_list() for s in nest.flatten(dataset_output_shapes)])) # Define a separate set of components with matching leading # dimension for the from-slices constructor. components_for_slices = (np.array([1, 2, 3], dtype=np.int64), (np.array([4., 5., 6.]), np.array([7., 8., 9.])), np.array([10, 11, 12], dtype=np.int64)) dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices) self.assertEqual( (dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64), dataset_ops.get_legacy_output_types(dataset)) self.assertEqual(([], ([], []), []), dataset_ops.get_legacy_output_shapes(dataset)) @combinations.generate(test_base.default_test_combinations()) def testNoneComponent(self): dataset = dataset_ops.Dataset.from_tensors((42, None)) if context.executing_eagerly(): self.assertDatasetProduces(dataset, expected_output=[(42, None)]) else: iterator = dataset_ops.make_one_shot_iterator(dataset) next_first, next_second = iterator.get_next() self.assertIsNone(next_second) with self.cached_session() as sess: self.assertEqual(sess.run(next_first), 42) @combinations.generate(test_base.default_test_combinations()) def testNoneComponentInFunction(self): @def_function.function def fn(ds): total = 0 it = iter(ds) for elem in it: x, _ = elem total += x return total dataset = dataset_ops.Dataset.range( 10, output_type=dtypes.int32).map(lambda x: (x, None)) self.assertEqual(self.evaluate(fn(dataset)), 45) @combinations.generate(test_base.default_test_combinations()) def testIncorrectPythonStructure(self): # Tests that an exception is raised (as opposed to a segfault) when the # Python structure assigned to a dataset is incorrect. dataset = dataset_ops.Dataset.range(10) spec = tensor_spec.TensorSpec([], dtypes.int64) new_structure = (spec, spec) dataset = dataset_ops._RestructuredDataset(dataset, new_structure) dataset = dataset.map(lambda x, y: y) with self.assertRaisesOpError(""): self.getDatasetOutput(dataset) @combinations.generate(test_base.default_test_combinations()) def testNamedTupleStructure(self): Foo = collections.namedtuple("Foo", ["a", "b"]) x = Foo(a=3, b="test") dataset = dataset_ops.Dataset.from_tensors(x) dataset = dataset_ops.Dataset.from_tensor_slices([dataset, dataset]) self.assertEqual( str(dataset.element_spec), "DatasetSpec(Foo(a=TensorSpec(shape=(), dtype=tf.int32, name=None), " "b=TensorSpec(shape=(), dtype=tf.string, name=None)), TensorShape([]))") @combinations.generate(test_base.eager_only_combinations()) def testIterationError(self): @def_function.function(autograph=False) def fn(ds): for _ in ds: pass dataset = dataset_ops.Dataset.range(10) with self.assertRaises(ValueError): self.evaluate(fn(dataset))
DatasetTest
python
doocs__leetcode
solution/0800-0899/0887.Super Egg Drop/Solution2.py
{ "start": 0, "end": 619 }
class ____: def superEggDrop(self, k: int, n: int) -> int: f = [[0] * (k + 1) for _ in range(n + 1)] for i in range(1, n + 1): f[i][1] = i for i in range(1, n + 1): for j in range(2, k + 1): l, r = 1, i while l < r: mid = (l + r + 1) >> 1 a, b = f[mid - 1][j - 1], f[i - mid][j] if a <= b: l = mid else: r = mid - 1 f[i][j] = max(f[l - 1][j - 1], f[i - l][j]) + 1 return f[n][k]
Solution
python
pytorch__pytorch
test/torch_np/numpy_tests/lib/test_function_base.py
{ "start": 20502, "end": 20811 }
class ____(TestCase): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] assert_equal(np.amax(a), 10.0) b = [[3, 6.0, 9.0], [4, 10.0, 5.0], [8, 3.0, 2.0]] assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0]) assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
TestAmax
python
huggingface__transformers
src/transformers/models/glm/modeling_glm.py
{ "start": 19067, "end": 22078 }
class ____(GlmPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config): super().__init__(config) self.model = GlmModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" Example: ```python >>> from transformers import AutoTokenizer, GlmForCausalLM >>> model = GlmForCausalLM.from_pretrained("meta-glm/Glm-2-7b-hf") >>> tokenizer = AutoTokenizer.from_pretrained("meta-glm/Glm-2-7b-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
GlmForCausalLM
python
pandas-dev__pandas
pandas/core/computation/engines.py
{ "start": 944, "end": 2708 }
class ____(metaclass=abc.ABCMeta): """Object serving as a base class for all engines.""" has_neg_frac = False def __init__(self, expr) -> None: self.expr = expr self.aligned_axes = None self.result_type = None self.result_name = None def convert(self) -> str: """ Convert an expression for evaluation. Defaults to return the expression as a string. """ return printing.pprint_thing(self.expr) def evaluate(self) -> object: """ Run the engine on the expression. This method performs alignment which is necessary no matter what engine is being used, thus its implementation is in the base class. Returns ------- object The result of the passed expression. """ if not self._is_aligned: self.result_type, self.aligned_axes, self.result_name = align_terms( self.expr.terms ) # make sure no names in resolvers and locals/globals clash res = self._evaluate() return reconstruct_object( self.result_type, res, self.aligned_axes, self.expr.terms.return_type, self.result_name, ) @property def _is_aligned(self) -> bool: return self.aligned_axes is not None and self.result_type is not None @abc.abstractmethod def _evaluate(self): """ Return an evaluated expression. Parameters ---------- env : Scope The local and global environment in which to evaluate an expression. Notes ----- Must be implemented by subclasses. """
AbstractEngine
python
streamlit__streamlit
lib/tests/streamlit/elements/text_test.py
{ "start": 3545, "end": 5211 }
class ____(DeltaGeneratorTestCase): """Test st.text text_alignment parameter.""" @parameterized.expand( [ ("left", 1), ("center", 2), ("right", 3), ("justify", 4), (None, 1), # Default case ], name_func=lambda func, num, param: f"{func.__name__}_{param.args[0] or 'default'}", ) def test_st_text_text_alignment( self, text_alignment: str | None, expected_alignment: int ): """Test st.text with various text_alignment values. Parameters ---------- text_alignment : str | None The text alignment value to test, or None for default behavior. expected_alignment : int The expected protobuf alignment enum value (1=LEFT, 2=CENTER, 3=RIGHT, 4=JUSTIFY). """ if text_alignment is None: st.text("Test text") else: st.text("Test text", text_alignment=text_alignment) el = self.get_delta_from_queue().new_element assert el.text.body == "Test text" assert el.text_alignment_config.alignment == expected_alignment def test_st_text_text_alignment_invalid(self): """Test st.text with invalid text_alignment raises error.""" with pytest.raises(StreamlitAPIException) as exc: st.text("Test text", text_alignment="middle") assert 'Invalid text_alignment value: "middle"' in str(exc.value) assert "left" in str(exc.value) assert "center" in str(exc.value) assert "right" in str(exc.value) assert "justify" in str(exc.value)
StTextTextAlignmentTest
python
kamyu104__LeetCode-Solutions
Python/tweet-counts-per-frequency.py
{ "start": 3402, "end": 4438 }
class ____(object): def __init__(self): self.__records = collections.defaultdict(lambda: SkipList(can_duplicated=True)) self.__lookup = {"minute":60, "hour":3600, "day":86400} def recordTweet(self, tweetName, time): """ :type tweetName: str :type time: int :rtype: None """ self.__records[tweetName].add(time) def getTweetCountsPerFrequency(self, freq, tweetName, startTime, endTime): """ :type freq: str :type tweetName: str :type startTime: int :type endTime: int :rtype: List[int] """ delta = self.__lookup[freq] result = [0]*((endTime-startTime)//delta+1) it = self.__records[tweetName].lower_bound(startTime) while it is not None and it.val <= endTime: result[(it.val-startTime)//delta] += 1 it = it.nexts[0] return result # Time: add: O(n), # query: O(rlogn), r is the size of result # Space: O(n) import bisect
TweetCounts
python
dagster-io__dagster
python_modules/dagster/dagster/_daemon/daemon.py
{ "start": 1899, "end": 2145 }
class ____(Enum): START_SPAN = "START_SPAN" END_SPAN = "END_SPAN" DaemonIterator: TypeAlias = Generator[Union[None, SerializableErrorInfo, SpanMarker], None, None] TContext = TypeVar("TContext", bound=IWorkspaceProcessContext)
SpanMarker
python
keras-team__keras
keras/src/layers/pooling/global_max_pooling3d.py
{ "start": 261, "end": 2585 }
class ____(BaseGlobalPooling): """Global max pooling operation for 3D data. Args: data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while `"channels_first"` corresponds to inputs with shape `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be `"channels_last"`. keepdims: A boolean, whether to keep the temporal dimension or not. If `keepdims` is `False` (default), the rank of the tensor is reduced for spatial dimensions. If `keepdims` is `True`, the spatial dimension are retained with length 1. The behavior is the same as for `tf.reduce_mean` or `np.mean`. Input shape: - If `data_format='channels_last'`: 5D tensor with shape: `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)` - If `data_format='channels_first'`: 5D tensor with shape: `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)` Output shape: - If `keepdims=False`: 2D tensor with shape `(batch_size, channels)`. - If `keepdims=True`: - If `data_format="channels_last"`: 5D tensor with shape `(batch_size, 1, 1, 1, channels)` - If `data_format="channels_first"`: 5D tensor with shape `(batch_size, channels, 1, 1, 1)` Example: >>> x = np.random.rand(2, 4, 5, 4, 3) >>> y = keras.layers.GlobalMaxPooling3D()(x) >>> y.shape (2, 3) """ def __init__(self, data_format=None, keepdims=False, **kwargs): super().__init__( pool_dimensions=3, data_format=data_format, keepdims=keepdims, **kwargs, ) def call(self, inputs): if self.data_format == "channels_last": return ops.max(inputs, axis=[1, 2, 3], keepdims=self.keepdims) return ops.max(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
GlobalMaxPooling3D
python
kamyu104__LeetCode-Solutions
Python/design-bitset.py
{ "start": 216, "end": 1574 }
class ____(object): def __init__(self, size): """ :type size: int """ self.__lookup = [False]*size self.__flip = False self.__cnt = 0 def fix(self, idx): """ :type idx: int :rtype: None """ if self.__lookup[idx] == self.__flip: self.__lookup[idx] = not self.__lookup[idx] self.__cnt += 1 def unfix(self, idx): """ :type idx: int :rtype: None """ if self.__lookup[idx] != self.__flip: self.__lookup[idx] = not self.__lookup[idx] self.__cnt -= 1 def flip(self): """ :rtype: None """ self.__flip = not self.__flip self.__cnt = len(self.__lookup)-self.__cnt def all(self): """ :rtype: bool """ return self.__cnt == len(self.__lookup) def one(self): """ :rtype: bool """ return self.__cnt >= 1 def count(self): """ :rtype: int """ return self.__cnt def toString(self): """ :rtype: str """ result = ['']*len(self.__lookup) for i, x in enumerate(self.__lookup): result[i] = '1' if x != self.__flip else '0' return "".join(result)
Bitset
python
facelessuser__pymdown-extensions
tests/test_extensions/test_magiclink.py
{ "start": 4940, "end": 12751 }
class ____(util.MdCase): """Test cases for custom provider.""" extension = [ 'pymdownx.magiclink', 'pymdownx.saneheaders' ] extension_configs = { 'pymdownx.magiclink': { 'repo_url_shorthand': True, 'repo_url_shortener': True, 'user': 'facelessuser', 'repo': 'pymdown-extensions', 'provider': 'test', 'custom': { 'test': { 'host': 'http://test.com', 'label': 'Test', 'type': 'github' } } } } def test_user(self): """Test user in custom repo.""" self.check_markdown( '@facelessuser', '<p><a class="magiclink magiclink-test magiclink-mention" href="http://test.com/facelessuser" title="Test User: facelessuser">@facelessuser</a></p>' # noqa: E501 ) def test_repo(self): """Test repo in custom repo.""" self.check_markdown( '@facelessuser/pymdown-extensions', '<p><a class="magiclink magiclink-test magiclink-repository" href="http://test.com/facelessuser/pymdown-extensions" title="Test Repository: facelessuser/pymdown-extensions">facelessuser/pymdown-extensions</a></p>' # noqa: E501 ) def test_default_issue(self): """Test default issue case.""" self.check_markdown( '#2', '<p><a class="magiclink magiclink-test magiclink-issue" href="http://test.com/facelessuser/pymdown-extensions/issues/2" title="Test Issue: facelessuser/pymdown-extensions #2">#2</a></p>' # noqa: E501 ) def test_default_pull(self): """Test default pull case.""" self.check_markdown( '!2', '<p><a class="magiclink magiclink-test magiclink-pull" href="http://test.com/facelessuser/pymdown-extensions/pull/2" title="Test Pull Request: facelessuser/pymdown-extensions #2">!2</a></p>' # noqa: E501 ) def test_default_discussion(self): """Test default discussion case.""" self.check_markdown( '?2', '<p><a class="magiclink magiclink-test magiclink-discussion" href="http://test.com/facelessuser/pymdown-extensions/discussions/2" title="Test Discussion: facelessuser/pymdown-extensions #2">?2</a></p>' # noqa: E501 ) def test_default_commit(self): """Test default commit case.""" self.check_markdown( '3f6b07a8eeaa9d606115758d90f55fec565d4e2a', '<p><a class="magiclink magiclink-test magiclink-commit" href="http://test.com/facelessuser/pymdown-extensions/commit/3f6b07a8eeaa9d606115758d90f55fec565d4e2a" title="Test Commit: facelessuser/pymdown-extensions@3f6b07a">3f6b07a</a></p>' # noqa: E501 ) def test_default_compare(self): """Test default compare case.""" self.check_markdown( 'e2ed7e0b3973f3f9eb7a26b8ef7ae514eebfe0d2...90b6fb8711e75732f987982cc024e9bb0111beac', '<p><a class="magiclink magiclink-test magiclink-compare" href="http://test.com/facelessuser/pymdown-extensions/compare/e2ed7e0b3973f3f9eb7a26b8ef7ae514eebfe0d2...90b6fb8711e75732f987982cc024e9bb0111beac" title="Test Compare: facelessuser/pymdown-extensions@e2ed7e0...90b6fb8">e2ed7e0...90b6fb8</a></p>' # noqa: E501 ) def test_user_link(self): """Test user link.""" self.check_markdown( 'http://test.com/facelessuser', '<p><a class="magiclink magiclink-test magiclink-mention" href="http://test.com/facelessuser" title="Test User: facelessuser">@facelessuser</a></p>' # noqa: E501 ) def test_repo_link(self): """Test repository link.""" self.check_markdown( 'http://test.com/facelessuser/pymdown-extensions', '<p><a class="magiclink magiclink-test magiclink-repository" href="http://test.com/facelessuser/pymdown-extensions" title="Test Repository: facelessuser/pymdown-extensions">facelessuser/pymdown-extensions</a></p>' # noqa: E501 ) def test_issue_link(self): """Test issue link.""" self.check_markdown( 'http://test.com/facelessuser/pymdown-extensions/issues/2', '<p><a class="magiclink magiclink-test magiclink-issue" href="http://test.com/facelessuser/pymdown-extensions/issues/2" title="Test Issue: facelessuser/pymdown-extensions #2">#2</a></p>' # noqa: E501 ) def test_pull_link(self): """Test issue link.""" self.check_markdown( 'http://test.com/facelessuser/pymdown-extensions/pull/2', '<p><a class="magiclink magiclink-test magiclink-pull" href="http://test.com/facelessuser/pymdown-extensions/pull/2" title="Test Pull Request: facelessuser/pymdown-extensions #2">!2</a></p>' # noqa: E501 ) def test_discussion_link(self): """Test discussion link.""" self.check_markdown( 'http://test.com/facelessuser/pymdown-extensions/discussions/2', '<p><a class="magiclink magiclink-test magiclink-discussion" href="http://test.com/facelessuser/pymdown-extensions/discussions/2" title="Test Discussion: facelessuser/pymdown-extensions #2">?2</a></p>' # noqa: E501 ) def test_commit_link(self): """Test commit link.""" self.check_markdown( 'http://test.com/facelessuser/pymdown-extensions/commit/3f6b07a8eeaa9d606115758d90f55fec565d4e2a', '<p><a class="magiclink magiclink-test magiclink-commit" href="http://test.com/facelessuser/pymdown-extensions/commit/3f6b07a8eeaa9d606115758d90f55fec565d4e2a" title="Test Commit: facelessuser/pymdown-extensions@3f6b07a">3f6b07a</a></p>' # noqa: E501 ) def test_compare_link(self): """Test compare link.""" self.check_markdown( 'http://test.com/facelessuser/pymdown-extensions/compare/e2ed7e0b3973f3f9eb7a26b8ef7ae514eebfe0d2...90b6fb8711e75732f987982cc024e9bb0111beac', '<p><a class="magiclink magiclink-test magiclink-compare" href="http://test.com/facelessuser/pymdown-extensions/compare/e2ed7e0b3973f3f9eb7a26b8ef7ae514eebfe0d2...90b6fb8711e75732f987982cc024e9bb0111beac" title="Test Compare: facelessuser/pymdown-extensions@e2ed7e0...90b6fb8">e2ed7e0...90b6fb8</a></p>' # noqa: E501 ) def test_external_user(self): """Test external user in custom repo.""" self.check_markdown( '@github:facelessuser', '<p><a class="magiclink magiclink-github magiclink-mention" href="https://github.com/facelessuser" title="GitHub User: facelessuser">@facelessuser</a></p>' # noqa: E501 ) self.check_markdown( '@test:facelessuser', '<p><a class="magiclink magiclink-test magiclink-mention" href="http://test.com/facelessuser" title="Test User: facelessuser">@facelessuser</a></p>' # noqa: E501 ) def test_bad_name(self): """Test bad name.""" extension = [ 'pymdownx.magiclink', 'pymdownx.saneheaders' ] extension_configs = { 'pymdownx.magiclink': { 'repo_url_shorthand': True, 'repo_url_shortener': True, 'user': 'facelessuser', 'repo': 'pymdown-extensions', 'provider': 'bad-name', 'custom': { 'bad-name': { 'host': 'http://bad.com', 'label': 'Bad', 'type': 'github' } } } } with self.assertRaises(ValueError): markdown.markdown('', extensions=extension, extension_configs=extension_configs)
TestMagicLinkCustom
python
plotly__plotly.py
plotly/graph_objs/scatter/_line.py
{ "start": 233, "end": 9559 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "scatter" _path_str = "scatter.line" _valid_props = { "backoff", "backoffsrc", "color", "dash", "shape", "simplify", "smoothing", "width", } @property def backoff(self): """ Sets the line back off from the end point of the nth line segment (in px). This option is useful e.g. to avoid overlap with arrowhead markers. With "auto" the lines would trim before markers if `marker.angleref` is set to "previous". The 'backoff' property is a number and may be specified as: - An int or float in the interval [0, inf] - A tuple, list, or one-dimensional numpy array of the above Returns ------- int|float|numpy.ndarray """ return self["backoff"] @backoff.setter def backoff(self, val): self["backoff"] = val @property def backoffsrc(self): """ Sets the source reference on Chart Studio Cloud for `backoff`. The 'backoffsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["backoffsrc"] @backoffsrc.setter def backoffsrc(self, val): self["backoffsrc"] = val @property def color(self): """ Sets the line color. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def dash(self): """ Sets the dash style of lines. Set to a dash type string ("solid", "dot", "dash", "longdash", "dashdot", or "longdashdot") or a dash length list in px (eg "5px,10px,2px,2px"). The 'dash' property is an enumeration that may be specified as: - One of the following dash styles: ['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot'] - A string containing a dash length list in pixels or percentages (e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.) Returns ------- str """ return self["dash"] @dash.setter def dash(self, val): self["dash"] = val @property def shape(self): """ Determines the line shape. With "spline" the lines are drawn using spline interpolation. The other available values correspond to step-wise line shapes. The 'shape' property is an enumeration that may be specified as: - One of the following enumeration values: ['linear', 'spline', 'hv', 'vh', 'hvh', 'vhv'] Returns ------- Any """ return self["shape"] @shape.setter def shape(self, val): self["shape"] = val @property def simplify(self): """ Simplifies lines by removing nearly-collinear points. When transitioning lines, it may be desirable to disable this so that the number of points along the resulting SVG path is unaffected. The 'simplify' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["simplify"] @simplify.setter def simplify(self, val): self["simplify"] = val @property def smoothing(self): """ Has an effect only if `shape` is set to "spline" Sets the amount of smoothing. 0 corresponds to no smoothing (equivalent to a "linear" shape). The 'smoothing' property is a number and may be specified as: - An int or float in the interval [0, 1.3] Returns ------- int|float """ return self["smoothing"] @smoothing.setter def smoothing(self, val): self["smoothing"] = val @property def width(self): """ Sets the line width (in px). The 'width' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["width"] @width.setter def width(self, val): self["width"] = val @property def _prop_descriptions(self): return """\ backoff Sets the line back off from the end point of the nth line segment (in px). This option is useful e.g. to avoid overlap with arrowhead markers. With "auto" the lines would trim before markers if `marker.angleref` is set to "previous". backoffsrc Sets the source reference on Chart Studio Cloud for `backoff`. color Sets the line color. dash Sets the dash style of lines. Set to a dash type string ("solid", "dot", "dash", "longdash", "dashdot", or "longdashdot") or a dash length list in px (eg "5px,10px,2px,2px"). shape Determines the line shape. With "spline" the lines are drawn using spline interpolation. The other available values correspond to step-wise line shapes. simplify Simplifies lines by removing nearly-collinear points. When transitioning lines, it may be desirable to disable this so that the number of points along the resulting SVG path is unaffected. smoothing Has an effect only if `shape` is set to "spline" Sets the amount of smoothing. 0 corresponds to no smoothing (equivalent to a "linear" shape). width Sets the line width (in px). """ def __init__( self, arg=None, backoff=None, backoffsrc=None, color=None, dash=None, shape=None, simplify=None, smoothing=None, width=None, **kwargs, ): """ Construct a new Line object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scatter.Line` backoff Sets the line back off from the end point of the nth line segment (in px). This option is useful e.g. to avoid overlap with arrowhead markers. With "auto" the lines would trim before markers if `marker.angleref` is set to "previous". backoffsrc Sets the source reference on Chart Studio Cloud for `backoff`. color Sets the line color. dash Sets the dash style of lines. Set to a dash type string ("solid", "dot", "dash", "longdash", "dashdot", or "longdashdot") or a dash length list in px (eg "5px,10px,2px,2px"). shape Determines the line shape. With "spline" the lines are drawn using spline interpolation. The other available values correspond to step-wise line shapes. simplify Simplifies lines by removing nearly-collinear points. When transitioning lines, it may be desirable to disable this so that the number of points along the resulting SVG path is unaffected. smoothing Has an effect only if `shape` is set to "spline" Sets the amount of smoothing. 0 corresponds to no smoothing (equivalent to a "linear" shape). width Sets the line width (in px). Returns ------- Line """ super().__init__("line") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.scatter.Line constructor must be a dict or an instance of :class:`plotly.graph_objs.scatter.Line`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("backoff", arg, backoff) self._set_property("backoffsrc", arg, backoffsrc) self._set_property("color", arg, color) self._set_property("dash", arg, dash) self._set_property("shape", arg, shape) self._set_property("simplify", arg, simplify) self._set_property("smoothing", arg, smoothing) self._set_property("width", arg, width) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Line
python
apache__airflow
airflow-ctl/src/airflowctl/api/datamodels/generated.py
{ "start": 28085, "end": 28357 }
class ____(BaseModel): """ Triggerer info serializer for responses. """ status: Annotated[str | None, Field(title="Status")] = None latest_triggerer_heartbeat: Annotated[str | None, Field(title="Latest Triggerer Heartbeat")] = None
TriggererInfoResponse
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/enum1.py
{ "start": 7105, "end": 7171 }
class ____(TestEnum17): A = (1, "A") B = (2, "B")
TestEnum18
python
huggingface__transformers
tests/models/segformer/test_image_processing_segformer.py
{ "start": 3236, "end": 13072 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = SegformerImageProcessor if is_vision_available() else None fast_image_processing_class = SegformerImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = SegformerImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_reduce_labels")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 30, "width": 30}) self.assertEqual(image_processor.do_reduce_labels, False) image_processor = image_processing_class.from_dict( self.image_processor_dict, size=42, do_reduce_labels=True ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.do_reduce_labels, True) def test_call_segmentation_maps(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input encoding = image_processing(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched encoding = image_processing(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test not batched input (PIL images) image, segmentation_map = prepare_semantic_single_inputs() encoding = image_processing(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched input (PIL images) images, segmentation_maps = prepare_semantic_batch_inputs() encoding = image_processing(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) def test_reduce_labels(self): # Initialize image_processing for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 image, map = prepare_semantic_single_inputs() encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) image_processing.do_reduce_labels = True encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) def test_slow_fast_equivalence(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") dummy_image, dummy_map = prepare_semantic_single_inputs() image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt") image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values) self._assert_slow_fast_tensors_equivalence( image_encoding_slow.labels.float(), image_encoding_fast.labels.float(), atol=5, mean_atol=0.01 ) def test_slow_fast_equivalence_batched(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop: self.skipTest( reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors" ) dummy_images, dummy_maps = prepare_semantic_batch_inputs() image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt") encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) self._assert_slow_fast_tensors_equivalence( encoding_slow.labels.float(), encoding_fast.labels.float(), atol=5, mean_atol=0.01 )
SegformerImageProcessingTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1083440, "end": 1084040 }
class ____(sgqlc.types.Type, Node): """A user, team, or app who has the ability to bypass a pull request requirement on a protected branch. """ __schema__ = github_schema __field_names__ = ("actor", "branch_protection_rule") actor = sgqlc.types.Field("BranchActorAllowanceActor", graphql_name="actor") """The actor that can bypass.""" branch_protection_rule = sgqlc.types.Field(BranchProtectionRule, graphql_name="branchProtectionRule") """Identifies the branch protection rule associated with the allowed user, team, or app. """
BypassPullRequestAllowance
python
huggingface__transformers
tests/models/deit/test_modeling_deit.py
{ "start": 15344, "end": 18034 }
class ____(unittest.TestCase): @cached_property def default_image_processor(self): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to( torch_device ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.0266, 0.1912, -1.2861]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_interpolate_pos_encoding(self): model = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224").to( torch_device ) image_processor = self.default_image_processor # image size is {"height": 480, "width": 640} image = prepare_img() image_processor.size = {"height": 480, "width": 640} # center crop set to False so image is not center cropped to 224x224 inputs = image_processor(images=image, return_tensors="pt", do_center_crop=False).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. """ model = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224", dtype=torch.float16, device_map="auto" ) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass to make sure inference works in fp16 with torch.no_grad(): _ = model(pixel_values)
DeiTModelIntegrationTest
python
huggingface__transformers
tests/models/fuyu/test_modeling_fuyu.py
{ "start": 10146, "end": 14568 }
class ____(unittest.TestCase): @cached_property def default_processor(self): return FuyuProcessor.from_pretrained("adept/fuyu-8b") @cached_property def default_model(self): return FuyuForCausalLM.from_pretrained("adept/fuyu-8b", dtype="float16", device_map=torch_device) def test_greedy_generation(self): processor = self.default_processor model = self.default_model url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" image = Image.open(io.BytesIO(requests.get(url).content)) text_prompt_coco_captioning = "Generate a coco-style caption.\n" inputs = processor(images=image, text=text_prompt_coco_captioning, return_tensors="pt").to( torch_device, torch.float16 ) generated_ids = model.generate(**inputs, max_new_tokens=10) # take the last 8 tokens (in order to skip special \n\x04 characters) and decode them generated_text = processor.batch_decode(generated_ids[:, -8:], skip_special_tokens=True)[0] self.assertEqual(generated_text, "A blue bus parked on the side of a road.") """ @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_bus_color(self): EXPECTED_TEXT_COMPLETION = "The bus is blue.\n|ENDOFTEXT|" text_prompt_bus_color = "What color is the bus?\n" model_inputs_bus_color = self.processor(text=text_prompt_bus_color, images=self.bus_image_pil) generated_tokens = self.model.generate(**model_inputs_bus_color, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) end_sequence = text[0].split("\x04")[1] clean_sequence = ( end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] if "|ENDOFTEXT|" in end_sequence else end_sequence ) self.assertEqual(EXPECTED_TEXT_COMPLETION, clean_sequence) @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_chart_vqa(self): EXPECTED_TEXT_TOKENS = ["The","life expectancy","at","birth","of male","s in","","20","18","is","","80",".","7",".","\n","|ENDOFTEXT|",] # fmt: skip expected_text_completion = " ".join(EXPECTED_TEXT_TOKENS) # TODO make sure the end string matches text_prompt_chart_vqa = "What is the highest life expectancy at birth of male?\n" chart_image_url = ( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/chart.png" ) chart_image_pil = Image.open(io.BytesIO(requests.get(chart_image_url).content)) model_inputs_chart_vqa = self.processor(text=text_prompt_chart_vqa, images=chart_image_pil) generated_tokens = self.model.generate(**model_inputs_chart_vqa, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) end_sequence = text[0].split("\x04")[1] clean_sequence = ( end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] if "|ENDOFTEXT|" in end_sequence else end_sequence ) self.assertEqual(expected_text_completion, clean_sequence) @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_bounding_box(self): EXPECTED_TEXT_COMPLETION = "\x00194213202244\x01|ENDOFTEXT|" text_prompt_bbox = "When presented with a box, perform OCR to extract text contained within it. If provided with text, generate the corresponding bounding box.\\nWilliams" # noqa: E231 bbox_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bbox_sample_image.png" bbox_image_pil = Image.open(io.BytesIO(requests.get(bbox_image_url).content)) model_inputs_bbox = self.processor(text=text_prompt_bbox, images=bbox_image_pil) generated_tokens = self.model.generate(**model_inputs_bbox, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) end_sequence = text[0].split("\x04")[1] clean_sequence = ( end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] if "|ENDOFTEXT|" in end_sequence else end_sequence ) self.assertEqual(EXPECTED_TEXT_COMPLETION, clean_sequence) """
FuyuModelIntegrationTest
python
ray-project__ray
rllib/algorithms/dreamerv3/torch/models/components/continue_predictor.py
{ "start": 314, "end": 2383 }
class ____(nn.Module): """The world-model network sub-component used to predict the `continue` flags . Predicted continue flags are used to produce "dream data" to learn the policy in. The continue flags are predicted via a linear output used to parameterize a Bernoulli distribution, from which simply the mode is used (no stochastic sampling!). In other words, if the sigmoid of the output of the linear layer is >0.5, we predict a continuation of the episode, otherwise we predict an episode terminal. """ def __init__(self, *, input_size: int, model_size: str = "XS"): """Initializes a ContinuePredictor instance. Args: input_size: The input size of the continue predictor. model_size: The "Model Size" used according to [1] Appendinx B. Determines the exact size of the underlying MLP. """ super().__init__() self.mlp = MLP( input_size=input_size, model_size=model_size, output_layer_size=1, ) def forward(self, h, z, return_distribution=False): """Performs a forward pass through the continue predictor. Args: h: The deterministic hidden state of the sequence model. [B, dim(h)]. z: The stochastic discrete representations of the original observation input. [B, num_categoricals, num_classes]. return_distribution: Whether to return (as a second tuple item) the Bernoulli distribution object created by the underlying MLP. """ z_shape = z.size() z = z.view(z_shape[0], -1) out = torch.cat([h, z], dim=-1) out = self.mlp(out) logits = out.squeeze(dim=-1) bernoulli = torch.distributions.Bernoulli(logits=logits) # Use the mode of the Bernoulli distribution (greedy, deterministic "sample"). continue_ = bernoulli.probs > 0.5 if return_distribution: return continue_, bernoulli return continue_
ContinuePredictor
python
skorch-dev__skorch
skorch/tests/callbacks/test_lr_scheduler.py
{ "start": 15067, "end": 19226 }
class ____(): def assert_lr_correct( self, optimizer, targets, epochs, min_lr, max_lr, base_period, period_mult): """Test that learning rate was set correctly.""" targets = [targets] if len(optimizer.param_groups) == 1 else targets scheduler = WarmRestartLR( optimizer, min_lr, max_lr, base_period, period_mult ) for epoch in range(epochs): optimizer.step() # suppress warning about .step call order scheduler.step(epoch) for param_group, target in zip(optimizer.param_groups, targets): assert param_group['lr'] == pytest.approx(target[epoch]) def _single_period_targets(self, epochs, min_lr, max_lr, period): targets = 1 + np.cos(np.arange(epochs) * np.pi / period) targets = min_lr + 0.5 * (max_lr - min_lr) * targets return targets.tolist() # pylint: disable=missing-docstring def _multi_period_targets( self, epochs, min_lr, max_lr, base_period, period_mult): remaining_epochs = epochs current_period = base_period targets = list() while remaining_epochs > 0: period_epochs = min(remaining_epochs, current_period + 1) remaining_epochs -= period_epochs targets += self._single_period_targets( period_epochs, min_lr, max_lr, current_period ) current_period = current_period * period_mult return targets @pytest.fixture() def init_optimizer(self, classifier_module): return SGD(classifier_module().parameters(), lr=0.05) def test_raise_incompatible_len_on_min_lr_err(self, init_optimizer): with pytest.raises(ValueError) as excinfo: WarmRestartLR(init_optimizer, min_lr=[1e-1, 1e-2]) assert 'min_lr' in str(excinfo.value) def test_raise_incompatible_len_on_max_lr_err(self, init_optimizer): with pytest.raises(ValueError) as excinfo: WarmRestartLR(init_optimizer, max_lr=[1e-1, 1e-2]) assert 'max_lr' in str(excinfo.value) def test_single_period(self, init_optimizer): optimizer = init_optimizer epochs = 3 min_lr = 5e-5 max_lr = 5e-2 base_period = 3 period_mult = 1 targets = self._single_period_targets( epochs, min_lr, max_lr, base_period) self.assert_lr_correct( optimizer, targets, epochs, min_lr, max_lr, base_period, period_mult ) def test_multi_period_with_restart(self, init_optimizer): optimizer = init_optimizer epochs = 9 min_lr = 5e-5 max_lr = 5e-2 base_period = 2 period_mult = 2 targets = self._multi_period_targets( epochs, min_lr, max_lr, base_period, period_mult ) self.assert_lr_correct( optimizer, targets, epochs, min_lr, max_lr, base_period, period_mult ) def test_restarts_with_multiple_groups(self, classifier_module): classifier = classifier_module() optimizer = SGD( [ {'params': classifier.sequential[0].parameters(), 'lr': 1e-3}, {'params': classifier.sequential[1].parameters(), 'lr': 1e-2}, {'params': classifier.sequential[2].parameters(), 'lr': 1e-1}, ] ) epochs = 9 min_lr_group = [1e-5, 1e-4, 1e-3] max_lr_group = [1e-3, 1e-2, 1e-1] base_period = 2 period_mult = 2 targets = list() for min_lr, max_lr in zip(min_lr_group, max_lr_group): targets.append( self._multi_period_targets( epochs, min_lr, max_lr, base_period, period_mult ) ) self.assert_lr_correct( optimizer, targets, epochs, min_lr_group, max_lr_group, base_period, period_mult )
TestWarmRestartLR
python
doocs__leetcode
solution/2300-2399/2336.Smallest Number in Infinite Set/Solution.py
{ "start": 0, "end": 415 }
class ____: def __init__(self): self.s = SortedSet(range(1, 1001)) def popSmallest(self) -> int: x = self.s[0] self.s.remove(x) return x def addBack(self, num: int) -> None: self.s.add(num) # Your SmallestInfiniteSet object will be instantiated and called as such: # obj = SmallestInfiniteSet() # param_1 = obj.popSmallest() # obj.addBack(num)
SmallestInfiniteSet
python
pytorch__pytorch
benchmarks/transformer/attention_bias_benchmarks.py
{ "start": 1485, "end": 2078 }
class ____: config: ExperimentConfig results: ExperimentResults def get_entries(self) -> list: return self.config.get_entries() + self.results.get_entries() def generate_inputs( batch_size, q_sequence_length, kv_sequence_length, embed_dim, dtype, device ): q_shape = (batch_size, q_sequence_length, embed_dim) kv_shape = (batch_size, kv_sequence_length, embed_dim) make_q = partial(torch.rand, q_shape, device=device, dtype=dtype) make_kv = partial(torch.rand, kv_shape, device=device, dtype=dtype) return make_q(), make_kv(), make_kv()
Experiment
python
huggingface__transformers
examples/pytorch/question-answering/run_qa.py
{ "start": 3497, "end": 31732 }
class ____: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: int = field( default=384, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when" " batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU)." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) version_2_with_negative: bool = field( default=False, metadata={"help": "If true, some of the examples do not have an answer."} ) null_score_diff_threshold: float = field( default=0.0, metadata={ "help": ( "The threshold used to select the null answer: if the best answer has a score that is less than " "the score of the null answer minus this threshold, the null answer is selected for this example. " "Only useful when `version_2_with_negative=True`." ) }, ) doc_stride: int = field( default=128, metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, ) n_best_size: int = field( default=20, metadata={"help": "The total number of n-best predictions to generate when looking for an answer."}, ) max_answer_length: int = field( default=30, metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) }, ) def __post_init__(self): if ( self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None ): raise ValueError("Need either a dataset name or a training/validation file/test_file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.test_file is not None: extension = self.test_file.split(".")[-1] assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # Tokenizer check: this script requires a fast tokenizer. # Check if tokenizer has _tokenizer attribute (from tokenizers library) or is_fast property if not (hasattr(tokenizer, "_tokenizer") or getattr(tokenizer, "is_fast", False)): raise TypeError( "This example script only works for models that have a fast tokenizer. Check out the big table of models at" " https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" " this requirement" ) # Preprocessing the datasets. # Preprocessing is slightly different for training and evaluation. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names else: column_names = raw_datasets["test"].column_names question_column_name = "question" if "question" in column_names else column_names[0] context_column_name = "context" if "context" in column_names else column_names[1] answer_column_name = "answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == "right" if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop("offset_mapping") # Let's label those examples! tokenized_examples["start_positions"] = [] tokenized_examples["end_positions"] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples["input_ids"][i] if tokenizer.cls_token_id in input_ids: cls_index = input_ids.index(tokenizer.cls_token_id) elif tokenizer.bos_token_id in input_ids: cls_index = input_ids.index(tokenizer.bos_token_id) else: cls_index = 0 # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers["answer_start"]) == 0: tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Start/end character index of the answer in the text. start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != (1 if pad_on_right else 0): token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != (1 if pad_on_right else 0): token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples["start_positions"].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples["end_positions"].append(token_end_index + 1) return tokenized_examples if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: # We will select sample from whole data if argument is specified max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Create train feature from dataset with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if data_args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Validation preprocessing def prepare_validation_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_examples = raw_datasets["validation"] if data_args.max_eval_samples is not None: # We will select sample from whole data max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) eval_examples = eval_examples.select(range(max_eval_samples)) # Validation Feature Creation with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if data_args.max_eval_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_examples = raw_datasets["test"] if data_args.max_predict_samples is not None: # We will select sample from whole data predict_examples = predict_examples.select(range(data_args.max_predict_samples)) # Predict Feature Creation with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) if data_args.max_predict_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Data collator # We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data # collator. data_collator = ( default_data_collator if data_args.pad_to_max_length else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) ) # Post-processing: def post_processing_function(examples, features, predictions, stage="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. predictions = postprocess_qa_predictions( examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, log_level=log_level, prefix=stage, ) # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {"id": str(k), "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() ] else: formatted_predictions = [{"id": str(k), "prediction_text": v} for k, v in predictions.items()] references = [{"id": str(ex["id"]), "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) if data_args.version_2_with_negative: accepted_best_metrics = ("exact", "f1", "HasAns_exact", "HasAns_f1") else: accepted_best_metrics = ("exact_match", "f1") if training_args.load_best_model_at_end and training_args.metric_for_best_model not in accepted_best_metrics: warnings.warn(f"--metric_for_best_model should be set to one of {accepted_best_metrics}") metric = evaluate.load( "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir ) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Initialize our Trainer trainer = QuestionAnsweringTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, eval_examples=eval_examples if training_args.do_eval else None, processing_class=tokenizer, data_collator=data_collator, post_process_function=post_processing_function, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Prediction if training_args.do_predict: logger.info("*** Predict ***") results = trainer.predict(predict_dataset, predict_examples) metrics = results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
DataTrainingArguments
python
sympy__sympy
sympy/integrals/manualintegrate.py
{ "start": 4882, "end": 5237 }
class ____(Rule): """integrate(f(x) + g(x), x) -> integrate(f(x), x) + integrate(g(x), x)""" substeps: list[Rule] def eval(self) -> Expr: return Add(*(substep.eval() for substep in self.substeps)) def contains_dont_know(self) -> bool: return any(substep.contains_dont_know() for substep in self.substeps) @dataclass
AddRule
python
neetcode-gh__leetcode
python/2405-optimal-partition-of-string.py
{ "start": 0, "end": 227 }
class ____: def partitionString(self, s: str) -> int: c=0 res=set() for i in s: if i in res: c=c+1 res=set() res.add(i) return c+1
Solution
python
weaviate__weaviate-python-client
weaviate/collections/queries/near_media/query/executor.py
{ "start": 946, "end": 16505 }
class ____( Generic[ConnectionType, Properties, References], _BaseExecutor[ConnectionType] ): @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Literal[None] = None, ) -> executor.Result[QueryReturn[Properties, References]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: REFERENCES, ) -> executor.Result[QueryReturn[Properties, CrossReferences]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Type[TReferences], ) -> executor.Result[QueryReturn[Properties, TReferences]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Literal[None] = None, ) -> executor.Result[QueryReturn[TProperties, References]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: REFERENCES, ) -> executor.Result[QueryReturn[TProperties, CrossReferences]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Literal[None] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Type[TReferences], ) -> executor.Result[QueryReturn[TProperties, TReferences]]: ... ### GroupBy ### @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Literal[None] = None, ) -> executor.Result[GroupByReturn[Properties, References]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: REFERENCES, ) -> executor.Result[GroupByReturn[Properties, CrossReferences]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Union[PROPERTIES, bool, None] = None, return_references: Type[TReferences], ) -> executor.Result[GroupByReturn[Properties, TReferences]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Literal[None] = None, ) -> executor.Result[GroupByReturn[TProperties, References]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: REFERENCES, ) -> executor.Result[GroupByReturn[TProperties, CrossReferences]]: ... @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: GroupBy, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Type[TProperties], return_references: Type[TReferences], ) -> executor.Result[GroupByReturn[TProperties, TReferences]]: ... ### DEFAULT ### @overload def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Optional[GroupBy] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Optional[ReturnProperties[TProperties]] = None, return_references: Optional[ReturnReferences[TReferences]] = None, ) -> executor.Result[ QuerySearchReturnType[Properties, References, TProperties, TReferences] ]: ... def near_media( self, media: BLOB_INPUT, media_type: NearMediaType, *, certainty: Optional[NUMBER] = None, distance: Optional[NUMBER] = None, limit: Optional[int] = None, offset: Optional[int] = None, auto_limit: Optional[int] = None, filters: Optional[_Filters] = None, group_by: Optional[GroupBy] = None, rerank: Optional[Rerank] = None, target_vector: Optional[TargetVectorJoinType] = None, include_vector: INCLUDE_VECTOR = False, return_metadata: Optional[METADATA] = None, return_properties: Optional[ReturnProperties[TProperties]] = None, return_references: Optional[ReturnReferences[TReferences]] = None, ) -> executor.Result[QuerySearchReturnType[Properties, References, TProperties, TReferences]]: """Search for objects by audio in this collection using an audio-capable vectorization module and vector-based similarity search. See the [docs](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/multi2vec-bind) for a more detailed explanation. NOTE: You must have a multi-media-capable vectorization module installed in order to use this method, e.g. `multi2vec-bind`. Args: media: The media file to search on, REQUIRED. This can be a base64 encoded string of the binary, a path to the file, or a file-like object. media_type: The type of the provided media file, REQUIRED. certainty: The minimum similarity score to return. If not specified, the default certainty specified by the server is used. distance: The maximum distance to search. If not specified, the default distance specified by the server is used. limit: The maximum number of results to return. If not specified, the default limit specified by the server is returned. offset: The offset to start from. If not specified, the retrieval begins from the first object in the server. auto_limit: The maximum number of [autocut](https://weaviate.io/developers/weaviate/api/graphql/additional-operators#autocut) results to return. If not specified, no limit is applied. filters: The filters to apply to the search. group_by: How the results should be grouped by a specific property. rerank: How the results should be reranked. NOTE: A `rerank-*` module must be enabled for this functionality to work. target_vector: The name of the vector space to search in for named vector configurations. Required if multiple spaces are configured. include_vector: Whether to include the vector in the results. If not specified, this is set to False. return_metadata: The metadata to return for each object, defaults to `None`. return_properties: The properties to return for each object. return_references: The references to return for each object. NOTE: - If `return_properties` is not provided then all properties are returned except for blob properties. - If `return_metadata` is not provided then no metadata is provided. Use MetadataQuery.full() to retrieve all metadata. - If `return_references` is not provided then no references are provided. Returns: A `QueryReturn` or `GroupByReturn` object that includes the searched objects. If `group_by` is provided then a `GroupByReturn` object is returned, otherwise a `QueryReturn` object is returned. Raises: weaviate.exceptions.WeaviateQueryError: If the request to the Weaviate server fails. """ def resp( res: search_get_pb2.SearchReply, ) -> QuerySearchReturnType[Properties, References, TProperties, TReferences]: return cast( Any, self._result_to_query_or_groupby_return( res, _QueryOptions.from_input( return_metadata, return_properties, include_vector, self._references, return_references, rerank, group_by, ), ), ) request = self._query.near_media( media=parse_blob(media), type_=media_type.value, certainty=certainty, distance=distance, filters=filters, group_by=_GroupBy.from_input(group_by), rerank=rerank, target_vector=target_vector, limit=limit, offset=offset, autocut=auto_limit, return_metadata=self._parse_return_metadata(return_metadata, include_vector), return_properties=self._parse_return_properties(return_properties), return_references=self._parse_return_references(return_references), ) return executor.execute( response_callback=resp, method=self._connection.grpc_search, request=request, )
_NearMediaQueryExecutor
python
doocs__leetcode
solution/2800-2899/2839.Check if Strings Can be Made Equal With Operations I/Solution.py
{ "start": 0, "end": 181 }
class ____: def canBeEqual(self, s1: str, s2: str) -> bool: return sorted(s1[::2]) == sorted(s2[::2]) and sorted(s1[1::2]) == sorted( s2[1::2] )
Solution
python
pandas-dev__pandas
asv_bench/benchmarks/frame_methods.py
{ "start": 11367, "end": 12940 }
class ____: params = ( [True, False], [ "float64", "float32", "object", "Int64", "Float64", "datetime64[ns]", "datetime64[ns, tz]", "timedelta64[ns]", ], ) param_names = ["inplace", "dtype"] def setup(self, inplace, dtype): N, M = 10000, 100 if dtype in ("datetime64[ns]", "datetime64[ns, tz]", "timedelta64[ns]"): data = { "datetime64[ns]": date_range("2011-01-01", freq="h", periods=N), "datetime64[ns, tz]": date_range( "2011-01-01", freq="h", periods=N, tz="Asia/Tokyo" ), "timedelta64[ns]": timedelta_range(start="1 day", periods=N, freq="1D"), } self.df = DataFrame({f"col_{i}": data[dtype] for i in range(M)}) self.df[::2] = None else: values = np.random.randn(N, M) values[::2] = np.nan if dtype == "Int64": values = values.round() values = values.astype(object) values[::2] = NA self.df = DataFrame(values, dtype=dtype) self.fill_values = self.df.iloc[self.df.first_valid_index()].to_dict() def time_fillna(self, inplace, dtype): self.df.fillna(value=self.fill_values, inplace=inplace) def time_ffill(self, inplace, dtype): self.df.ffill(inplace=inplace) def time_bfill(self, inplace, dtype): self.df.bfill(inplace=inplace)
Fillna
python
pennersr__django-allauth
allauth/socialaccount/providers/flickr/views.py
{ "start": 760, "end": 1476 }
class ____(OAuthAdapter): provider_id = "flickr" request_token_url = "https://www.flickr.com/services/oauth/request_token" # nosec access_token_url = "https://www.flickr.com/services/oauth/access_token" # nosec authorize_url = "https://www.flickr.com/services/oauth/authorize" def complete_login(self, request, app, token, response): client = FlickrAPI(request, app.client_id, app.secret, self.request_token_url) extra_data = client.get_user_info() return self.get_provider().sociallogin_from_response(request, extra_data) oauth_login = OAuthLoginView.adapter_view(FlickrOAuthAdapter) oauth_callback = OAuthCallbackView.adapter_view(FlickrOAuthAdapter)
FlickrOAuthAdapter
python
dagster-io__dagster
python_modules/dagster/dagster_tests/storage_tests/utils/compute_log_manager.py
{ "start": 311, "end": 9651 }
class ____: """You can extend this class to easily run these set of tests on any compute log manager. When extending, you simply need to override the `compute_log_manager` fixture and return your implementation of `ComputeLogManager`. For example: ``` class TestMyComputeLogManagerImplementation(TestComputeLogManager): __test__ = True @pytest.fixture(scope='function', name='compute_log_manager') def compute_log_manager(self): return MyComputeLogManagerImplementation() ``` """ __test__ = False @pytest.fixture(name="compute_log_manager") def compute_log_manager(self): yield @pytest.fixture(name="write_manager") def write_manager(self): yield @pytest.fixture(name="read_manager") def read_manager(self): yield @pytest.mark.skipif( should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+" ) def test_capture(self, compute_log_manager): now = get_current_datetime() log_key = ["arbitrary", "log", "key", now.strftime("%Y_%m_%d__%H_%M_%S")] with compute_log_manager.capture_logs(log_key) as context: print("HELLO WORLD") # noqa: T201 print("HELLO ERROR", file=sys.stderr) # noqa: T201 assert not compute_log_manager.is_capture_complete(log_key) assert context.log_key == log_key assert compute_log_manager.is_capture_complete(log_key) log_data = compute_log_manager.get_log_data(log_key) assert log_data.stdout == b"HELLO WORLD\n" assert log_data.stderr == b"HELLO ERROR\n" assert log_data.cursor log_metadata = compute_log_manager.get_log_metadata(log_key) assert log_metadata.stdout_location assert log_metadata.stderr_location assert log_metadata.stdout_download_url assert log_metadata.stderr_download_url @pytest.mark.skipif( should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+" ) def test_long_key(self, compute_log_manager): log_key = ["".join(random.choice(string.ascii_lowercase) for x in range(300))] with compute_log_manager.capture_logs(log_key) as context: print("HELLO WORLD") # noqa: T201 print("HELLO ERROR", file=sys.stderr) # noqa: T201 assert not compute_log_manager.is_capture_complete(log_key) assert context.log_key == log_key assert compute_log_manager.is_capture_complete(log_key) log_data = compute_log_manager.get_log_data(log_key) assert log_data.stdout == b"HELLO WORLD\n" assert log_data.stderr == b"HELLO ERROR\n" assert log_data.cursor log_metadata = compute_log_manager.get_log_metadata(log_key) assert log_metadata.stdout_location assert log_metadata.stderr_location assert log_metadata.stdout_download_url assert log_metadata.stderr_download_url @pytest.mark.skipif( should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+" ) def test_streaming(self, write_manager, read_manager): from dagster._core.storage.cloud_storage_compute_log_manager import ( CloudStorageComputeLogManager, ) if ( not isinstance(write_manager, CloudStorageComputeLogManager) or not isinstance(read_manager, CloudStorageComputeLogManager) or not write_manager.upload_interval ): pytest.skip("does not support streaming") now = get_current_datetime() log_key = ["streaming", "log", "key", now.strftime("%Y_%m_%d__%H_%M_%S")] with write_manager.capture_logs(log_key): print("hello stdout") # noqa: T201 print("hello stderr", file=sys.stderr) # noqa: T201 # read before the write manager has a chance to upload partial results log_data = read_manager.get_log_data(log_key) assert not log_data.stdout assert not log_data.stderr # wait past the upload interval and then read again time.sleep(write_manager.upload_interval + 1) log_data = read_manager.get_log_data(log_key) # print('WTF', log_data.stdout) assert log_data.stdout == b"hello stdout\n" assert log_data.stderr == b"hello stderr\n" # check the cloud storage directly that only partial keys have been uploaded assert not read_manager.cloud_storage_has_logs(log_key, ComputeIOType.STDOUT) assert not read_manager.cloud_storage_has_logs(log_key, ComputeIOType.STDOUT) assert read_manager.cloud_storage_has_logs(log_key, ComputeIOType.STDERR, partial=True) assert read_manager.cloud_storage_has_logs(log_key, ComputeIOType.STDERR, partial=True) def test_truncation(self, write_manager, read_manager): from dagster._core.storage.cloud_storage_compute_log_manager import ( TruncatingCloudStorageComputeLogManager, ) if not isinstance(write_manager, TruncatingCloudStorageComputeLogManager) or not isinstance( read_manager, TruncatingCloudStorageComputeLogManager ): pytest.skip("does not support truncation") with environ({"DAGSTER_TRUNCATE_COMPUTE_LOGS_UPLOAD_BYTES": "5"}): now = get_current_datetime() log_key = ["truncating", "log", "key", now.strftime("%Y_%m_%d__%H_%M_%S")] with write_manager.capture_logs(log_key): print("hello stdout") # noqa: T201 print("hello stderr", file=sys.stderr) # noqa: T201 log_data = read_manager.get_log_data(log_key) assert log_data.stdout == b"hello" assert log_data.stderr == b"hello" @pytest.mark.skipif( should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+" ) def test_complete_checks(self, write_manager, read_manager): from dagster._core.storage.cloud_storage_compute_log_manager import ( CloudStorageComputeLogManager, ) if not isinstance(write_manager, CloudStorageComputeLogManager) or not isinstance( read_manager, CloudStorageComputeLogManager ): pytest.skip("unnecessary check since write/read manager should have the same behavior") now = get_current_datetime() log_key = ["complete", "test", "log", "key", now.strftime("%Y_%m_%d__%H_%M_%S")] with write_manager.capture_logs(log_key): print("hello stdout") # noqa: T201 print("hello stderr", file=sys.stderr) # noqa: T201 assert not write_manager.is_capture_complete(log_key) assert not read_manager.is_capture_complete(log_key) assert write_manager.is_capture_complete(log_key) assert read_manager.is_capture_complete(log_key) def test_log_stream(self, compute_log_manager): log_key = ["some", "log", "key"] with compute_log_manager.open_log_stream(log_key, ComputeIOType.STDOUT) as write_stream: write_stream.write("hello hello") log_data = compute_log_manager.get_log_data(log_key) assert log_data.stdout == b"hello hello" def test_delete_logs(self, compute_log_manager): log_key = ["some", "log", "key"] other_log_key = ["other", "log", "key"] with compute_log_manager.open_log_stream(log_key, ComputeIOType.STDOUT) as write_stream: write_stream.write("hello hello") with compute_log_manager.open_log_stream( other_log_key, ComputeIOType.STDOUT ) as write_stream: write_stream.write("hello hello") log_data = compute_log_manager.get_log_data(log_key) assert log_data.stdout == b"hello hello" other_log_data = compute_log_manager.get_log_data(other_log_key) assert other_log_data.stdout == b"hello hello" compute_log_manager.delete_logs(log_key=log_key) log_data = compute_log_manager.get_log_data(log_key) assert log_data.stdout is None other_log_data = compute_log_manager.get_log_data(other_log_key) assert other_log_data.stdout == b"hello hello" def test_delete_log_prefix(self, compute_log_manager): log_key = ["some", "log", "key"] other_log_key = ["some", "log", "other_key"] with compute_log_manager.open_log_stream(log_key, ComputeIOType.STDOUT) as write_stream: write_stream.write("hello hello") with compute_log_manager.open_log_stream( other_log_key, ComputeIOType.STDOUT ) as write_stream: write_stream.write("hello hello") log_data = compute_log_manager.get_log_data(log_key) assert log_data.stdout == b"hello hello" other_log_data = compute_log_manager.get_log_data(other_log_key) assert other_log_data.stdout == b"hello hello" compute_log_manager.delete_logs(prefix=["some", "log"]) log_data = compute_log_manager.get_log_data(log_key) assert log_data.stdout is None other_log_data = compute_log_manager.get_log_data(other_log_key) assert other_log_data.stdout is None
TestComputeLogManager
python
apache__airflow
providers/opensearch/tests/unit/opensearch/hooks/test_opensearch.py
{ "start": 1510, "end": 4397 }
class ____: def test_hook_search(self, mock_hook): hook = OpenSearchHook(open_search_conn_id="opensearch_default", log_query=True) result = hook.search( index_name="testIndex", query={"size": 1, "query": {"multi_match": {"query": "test", "fields": ["testField"]}}}, ) assert result == MOCK_SEARCH_RETURN def test_hook_index(self, mock_hook): hook = OpenSearchHook(open_search_conn_id="opensearch_default", log_query=True) result = hook.index(index_name="test_index", document={"title": "Monty Python"}, doc_id=3) assert result == 3 def test_delete_check_parameters(self): hook = OpenSearchHook(open_search_conn_id="opensearch_default", log_query=True) with pytest.raises(AirflowException, match="must include one of either a query or a document id"): hook.delete(index_name="test_index") @mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection") def test_hook_param_bool(self, mock_get_connection): mock_conn = Connection( conn_id="opensearch_default", extra={"use_ssl": "True", "verify_certs": "True"} ) mock_get_connection.return_value = mock_conn hook = OpenSearchHook(open_search_conn_id="opensearch_default", log_query=True) assert isinstance(hook.use_ssl, bool) assert isinstance(hook.verify_certs, bool) def test_load_conn_param(self, mock_hook): hook_default = OpenSearchHook(open_search_conn_id="opensearch_default", log_query=True) assert hook_default.connection_class == DEFAULT_CONN hook_Urllib3 = OpenSearchHook( open_search_conn_id="opensearch_default", log_query=True, open_search_conn_class=Urllib3HttpConnection, ) assert hook_Urllib3.connection_class == Urllib3HttpConnection def test_hook_with_auth(self, monkeypatch): monkeypatch.setenv( "AIRFLOW_CONN_OPENSEARCH_DEFAULT", json.dumps( { "conn_type": "opensearch hook", "host": "testhost", "login": "testuser", "password": "testpass", } ), ) hook = OpenSearchHook(open_search_conn_id="opensearch_default", log_query=True) assert hook.client.transport.kwargs["http_auth"] == ("testuser", "testpass") def test_hook_no_auth(self, monkeypatch): monkeypatch.setenv( "AIRFLOW_CONN_OPENSEARCH_DEFAULT", json.dumps( { "conn_type": "opensearch hook", "host": "testhost", } ), ) hook = OpenSearchHook(open_search_conn_id="opensearch_default", log_query=True) assert "http_auth" not in hook.client.transport.kwargs
TestOpenSearchHook
python
dagster-io__dagster
python_modules/dagster/dagster/_core/remote_representation/external_data.py
{ "start": 6970, "end": 7052 }
class ____(Enum): RESOURCE = "RESOURCE" @whitelist_for_serdes
EnvVarConsumerType
python
pytorch__pytorch
test/test_xpu.py
{ "start": 24242, "end": 26435 }
class ____(TestCase): @suppress_warnings @ops(_xpu_computation_ops, dtypes=any_common_cpu_xpu_one) def test_compare_cpu(self, device, dtype, op): def to_cpu(arg): if isinstance(arg, torch.Tensor): return arg.to(device="cpu") return arg samples = op.reference_inputs(device, dtype) for sample in samples: cpu_sample = sample.transform(to_cpu) xpu_results = op(sample.input, *sample.args, **sample.kwargs) cpu_results = op(cpu_sample.input, *cpu_sample.args, **cpu_sample.kwargs) xpu_results = sample.output_process_fn_grad(xpu_results) cpu_results = cpu_sample.output_process_fn_grad(cpu_results) # Lower tolerance because we are running this as a `@slowTest` # Don't want the periodic tests to fail frequently self.assertEqual(xpu_results, cpu_results, atol=1e-4, rtol=1e-4) @ops(_xpu_computation_ops, allowed_dtypes=(torch.bool,)) def test_non_standard_bool_values(self, device, dtype, op): # Test boolean values other than 0x00 and 0x01 (gh-54789) def convert_boolean_tensors(x): if not isinstance(x, torch.Tensor) or x.dtype != torch.bool: return x # Map False -> 0 and True -> Random value in [2, 255] true_vals = torch.randint( 2, 255, x.shape, dtype=torch.uint8, device=x.device ) false_vals = torch.zeros((), dtype=torch.uint8, device=x.device) x_int = torch.where(x, true_vals, false_vals) ret = x_int.view(torch.bool) self.assertEqual(ret, x) return ret for sample in op.sample_inputs(device, dtype): expect = op(sample.input, *sample.args, **sample.kwargs) transformed = sample.transform(convert_boolean_tensors) actual = op(transformed.input, *transformed.args, **transformed.kwargs) self.assertEqual(expect, actual) instantiate_device_type_tests(TestXpuOps, globals(), only_for="xpu", allow_xpu=True) @unittest.skipIf(not TEST_XPU, "XPU not available, skipping tests")
TestXpuOps
python
pydantic__pydantic
pydantic-core/python/pydantic_core/core_schema.py
{ "start": 12704, "end": 13847 }
class ____(Protocol): # pragma: no cover def __call__(self, input_value: Any, index_key: int | str | None = None, /) -> Any: ... # (input_value: Any, serializer: SerializerFunctionWrapHandler, /) -> Any GeneralWrapNoInfoSerializerFunction = Callable[[Any, SerializerFunctionWrapHandler], Any] # (input_value: Any, serializer: SerializerFunctionWrapHandler, info: SerializationInfo, /) -> Any GeneralWrapInfoSerializerFunction = Callable[[Any, SerializerFunctionWrapHandler, SerializationInfo[Any]], Any] # (model: Any, input_value: Any, serializer: SerializerFunctionWrapHandler, /) -> Any FieldWrapNoInfoSerializerFunction = Callable[[Any, Any, SerializerFunctionWrapHandler], Any] # (model: Any, input_value: Any, serializer: SerializerFunctionWrapHandler, info: FieldSerializationInfo, /) -> Any FieldWrapInfoSerializerFunction = Callable[[Any, Any, SerializerFunctionWrapHandler, FieldSerializationInfo[Any]], Any] WrapSerializerFunction = Union[ GeneralWrapNoInfoSerializerFunction, GeneralWrapInfoSerializerFunction, FieldWrapNoInfoSerializerFunction, FieldWrapInfoSerializerFunction, ]
SerializerFunctionWrapHandler
python
sqlalchemy__sqlalchemy
test/orm/declarative/test_reflection.py
{ "start": 663, "end": 975 }
class ____(fixtures.TablesTest): __requires__ = ("reflectable_autoincrement",) def setup_test(self): global Base, registry registry = decl.registry(metadata=MetaData()) Base = registry.generate_base() def teardown_test(self): clear_mappers()
DeclarativeReflectionBase
python
getsentry__sentry
tests/sentry/api/endpoints/secret_scanning/test_github.py
{ "start": 562, "end": 7707 }
class ____(TestCase): path = reverse("sentry-api-0-secret-scanning-github") def test_invalid_content_type(self) -> None: response = self.client.post(self.path, content_type="application/x-www-form-urlencoded") assert response.status_code == 400 assert response.content == b'{"details":"invalid content type specified"}' def test_invalid_signature(self) -> None: response = self.client.post(self.path, content_type="application/json") assert response.status_code == 400 assert response.content == b'{"details":"invalid signature"}' @override_options({"secret-scanning.github.enable-signature-verification": False}) def test_false_positive(self) -> None: payload = [ { "source": "commit", "token": "some_token", "type": "some_type", "url": "https://example.com/base-repo-url/", } ] response = self.client.post(self.path, content_type="application/json", data=payload) assert response.status_code == 200 assert ( response.content == b'[{"token_hash":"9a45520a1213f15016d2d768b5fb3d904492a44ee274b44d4de8803e00fb536a","token_type":"some_type","label":"false_positive"}]' ) @override_options({"secret-scanning.github.enable-signature-verification": False}) def test_false_positive_deactivated_user_token(self) -> None: user = self.create_user() token = ApiToken.objects.create(user=user, name="test user token", scope_list=[]) # revoke token token.delete() payload = [ { "source": "commit", "token": str(token), "type": "sentry_user_auth_token", "url": "https://example.com/base-repo-url/", } ] with self.tasks(): response = self.client.post(self.path, content_type="application/json", data=payload) assert response.status_code == 200 expected = [ { "token_hash": hash_token(str(token)), "token_type": "sentry_user_auth_token", "label": "false_positive", } ] assert json.loads(response.content.decode("utf-8")) == expected assert len(mail.outbox) == 0 @override_options({"secret-scanning.github.enable-signature-verification": False}) def test_false_positive_deactivated_org_token(self) -> None: token_str = generate_token("test-org", "https://test-region.sentry.io") hash_digest = hash_token(token_str) token = OrgAuthToken.objects.create( organization_id=self.organization.id, name="test org token", scope_list=["org:ci"], token_hashed=hash_digest, ) # revoke token token.update(date_deactivated=timezone.now()) payload = [ { "source": "commit", "token": token_str, "type": "sentry_org_auth_token", "url": "https://example.com/base-repo-url/", } ] with self.tasks(): response = self.client.post(self.path, content_type="application/json", data=payload) assert response.status_code == 200 expected = [ { "token_hash": hash_digest, "token_type": "sentry_org_auth_token", "label": "false_positive", } ] assert json.loads(response.content.decode("utf-8")) == expected assert len(mail.outbox) == 0 @override_options({"secret-scanning.github.enable-signature-verification": False}) @patch("sentry.api.endpoints.secret_scanning.github.logger") def test_true_positive_user_token(self, mock_logger: MagicMock) -> None: user = self.create_user() token = ApiToken.objects.create(user=user, name="test user token", scope_list=[]) payload = [ { "source": "commit", "token": str(token.token), "type": "sentry_user_auth_token", "url": "https://example.com/base-repo-url/", } ] with self.tasks(): response = self.client.post(self.path, content_type="application/json", data=payload) assert response.status_code == 200 assert response.content == b"[]" extra = { "exposed_source": "commit", "exposed_url": "https://example.com/base-repo-url/", "hashed_token": token.hashed_token, "token_type": AuthTokenType.USER, } mock_logger.info.assert_called_with("found an exposed auth token", extra=extra) assert len(mail.outbox) == 1 assert mail.outbox[0].to == [user.username] assert mail.outbox[0].subject == "[Sentry]Action Required: User Auth Token Exposed" assert ( "Your Sentry User Auth Token was found publicly on the internet" in mail.outbox[0].body ) assert "http://testserver/settings/account/api/auth-tokens" in mail.outbox[0].body assert "test user token" in mail.outbox[0].body assert token.hashed_token in mail.outbox[0].body @override_options({"secret-scanning.github.enable-signature-verification": False}) @patch("sentry.api.endpoints.secret_scanning.github.logger") def test_true_positive_org_token(self, mock_logger: MagicMock) -> None: token_str = generate_token("test-org", "https://test-region.sentry.io") token = OrgAuthToken.objects.create( organization_id=self.organization.id, name="test org token", scope_list=["org:ci"], token_hashed=hash_token(token_str), ) payload = [ { "source": "commit", "token": token_str, "type": "sentry_org_auth_token", "url": "https://example.com/base-repo-url/", } ] with self.tasks(): response = self.client.post(self.path, content_type="application/json", data=payload) assert response.status_code == 200 assert response.content == b"[]" extra = { "exposed_source": "commit", "exposed_url": "https://example.com/base-repo-url/", "hashed_token": token.token_hashed, "token_type": AuthTokenType.ORG, } mock_logger.info.assert_called_with("found an exposed auth token", extra=extra) assert len(mail.outbox) == 1 assert mail.outbox[0].to == [self.user.username] assert mail.outbox[0].subject == "[Sentry]Action Required: Organization Auth Token Exposed" assert ( "Your Sentry Organization Auth Token was found publicly on the internet" in mail.outbox[0].body ) assert "http://baz.testserver/settings/auth-tokens/" in mail.outbox[0].body assert "test org token" in mail.outbox[0].body assert token.token_hashed in mail.outbox[0].body
SecretScanningGitHubTest
python
PyCQA__pylint
pylint/message/message_id_store.py
{ "start": 579, "end": 6370 }
class ____: """The MessageIdStore store MessageId and make sure that there is a 1-1 relation between msgid and symbol. """ def __init__(self) -> None: self.__msgid_to_symbol: dict[str, str] = {} self.__symbol_to_msgid: dict[str, str] = {} self.__old_names: dict[str, list[str]] = {} self.__active_msgids: dict[str, list[str]] = {} def __len__(self) -> int: return len(self.__msgid_to_symbol) def __repr__(self) -> str: result = "MessageIdStore: [\n" for msgid, symbol in self.__msgid_to_symbol.items(): result += f" - {msgid} ({symbol})\n" result += "]" return result def get_symbol(self, msgid: str) -> str: try: return self.__msgid_to_symbol[msgid.upper()] except KeyError as e: msg = f"'{msgid}' is not stored in the message store." raise UnknownMessageError(msg) from e def get_msgid(self, symbol: str) -> str: try: return self.__symbol_to_msgid[symbol] except KeyError as e: msg = f"'{symbol}' is not stored in the message store." raise UnknownMessageError(msg) from e def register_message_definition( self, msgid: str, symbol: str, old_names: list[tuple[str, str]] ) -> None: self.check_msgid_and_symbol(msgid, symbol) self.add_msgid_and_symbol(msgid, symbol) for old_msgid, old_symbol in old_names: self.check_msgid_and_symbol(old_msgid, old_symbol) self.add_legacy_msgid_and_symbol(old_msgid, old_symbol, msgid) def add_msgid_and_symbol(self, msgid: str, symbol: str) -> None: """Add valid message id. There is a little duplication with add_legacy_msgid_and_symbol to avoid a function call, this is called a lot at initialization. """ self.__msgid_to_symbol[msgid] = symbol self.__symbol_to_msgid[symbol] = msgid def add_legacy_msgid_and_symbol( self, msgid: str, symbol: str, new_msgid: str ) -> None: """Add valid legacy message id. There is a little duplication with add_msgid_and_symbol to avoid a function call, this is called a lot at initialization. """ self.__msgid_to_symbol[msgid] = symbol self.__symbol_to_msgid[symbol] = msgid existing_old_names = self.__old_names.get(msgid, []) existing_old_names.append(new_msgid) self.__old_names[msgid] = existing_old_names def check_msgid_and_symbol(self, msgid: str, symbol: str) -> None: existing_msgid: str | None = self.__symbol_to_msgid.get(symbol) existing_symbol: str | None = self.__msgid_to_symbol.get(msgid) if existing_symbol is None and existing_msgid is None: return # both symbol and msgid are usable if existing_msgid is not None: if existing_msgid != msgid: self._raise_duplicate_msgid(symbol, msgid, existing_msgid) if existing_symbol and existing_symbol != symbol: # See https://github.com/python/mypy/issues/10559 self._raise_duplicate_symbol(msgid, symbol, existing_symbol) @staticmethod def _raise_duplicate_symbol(msgid: str, symbol: str, other_symbol: str) -> NoReturn: """Raise an error when a symbol is duplicated.""" symbol_a, symbol_b = sorted([symbol, other_symbol]) raise InvalidMessageError( f"Message id '{msgid}' cannot have both " f"'{symbol_a}' and '{symbol_b}' as symbolic name." ) @staticmethod def _raise_duplicate_msgid(symbol: str, msgid: str, other_msgid: str) -> NoReturn: """Raise an error when a msgid is duplicated.""" msgid_a, msgid_b = sorted([msgid, other_msgid]) raise InvalidMessageError( f"Message symbol '{symbol}' cannot be used for " f"'{msgid_a}' and '{msgid_b}' at the same time." f" If you're creating an 'old_names' use 'old-{symbol}' as the old symbol." ) def get_active_msgids(self, msgid_or_symbol: str) -> list[str]: """Return msgids but the input can be a symbol. self.__active_msgids is used to implement a primitive cache for this function. """ try: return self.__active_msgids[msgid_or_symbol] except KeyError: pass # If we don't have a cached value yet we compute it msgid: str | None deletion_reason = None moved_reason = None if msgid_or_symbol[1:].isdigit(): # Only msgid can have a digit as second letter msgid = msgid_or_symbol.upper() symbol = self.__msgid_to_symbol.get(msgid) if not symbol: deletion_reason = is_deleted_msgid(msgid) if deletion_reason is None: moved_reason = is_moved_msgid(msgid) else: symbol = msgid_or_symbol msgid = self.__symbol_to_msgid.get(msgid_or_symbol) if not msgid: deletion_reason = is_deleted_symbol(symbol) if deletion_reason is None: moved_reason = is_moved_symbol(symbol) if not (msgid and symbol): if deletion_reason is not None: raise DeletedMessageError(msgid_or_symbol, deletion_reason) if moved_reason is not None: raise MessageBecameExtensionError(msgid_or_symbol, moved_reason) error_msg = f"No such message id or symbol '{msgid_or_symbol}'." raise UnknownMessageError(error_msg) ids = self.__old_names.get(msgid, [msgid]) # Add to cache self.__active_msgids[msgid_or_symbol] = ids return ids
MessageIdStore
python
pytorch__pytorch
test/test_dataloader.py
{ "start": 11654, "end": 11855 }
class ____(Dataset): def __init__(self, n): super().__init__() self.n = n def __getitem__(self, i): return i def __len__(self): return self.n
CountingDataset
python
pandas-dev__pandas
pandas/tests/scalar/timestamp/methods/test_timestamp_method.py
{ "start": 296, "end": 1151 }
class ____: @td.skip_if_windows @pytest.mark.skipif(WASM, reason="tzset is not available on WASM") def test_timestamp(self, fixed_now_ts): # GH#17329 # tz-naive --> treat it as if it were UTC for purposes of timestamp() ts = fixed_now_ts uts = ts.replace(tzinfo=timezone.utc) assert ts.timestamp() == uts.timestamp() tsc = Timestamp("2014-10-11 11:00:01.12345678", tz="US/Central") utsc = tsc.tz_convert("UTC") # utsc is a different representation of the same time assert tsc.timestamp() == utsc.timestamp() # datetime.timestamp() converts in the local timezone with tm.set_timezone("UTC"): # should agree with datetime.timestamp method dt = ts.to_pydatetime() assert dt.timestamp() == ts.timestamp()
TestTimestampMethod
python
pytorch__pytorch
test/onnx/ops/test_ops.py
{ "start": 15957, "end": 58484 }
class ____(common_utils.TestCase): def export(self, model, args=(), kwargs=None, **options) -> torch.onnx.ONNXProgram: onnx_program = torch.onnx.export( model, args, kwargs=kwargs, dynamo=True, fallback=False, verbose=False, **options, ) assert onnx_program is not None common_passes.CheckerPass()(onnx_program.model) return onnx_program def test_onnx_ops_can_be_decomposed_to_aten(self): input_data = torch.rand(2, 3, 4, 8) position_ids_data = torch.randint(0, 50, (2, 4)).long() sin_cache_data = torch.rand(50, 4) cos_cache_data = torch.rand(50, 4) class Model(torch.nn.Module): def forward( self, input_data, cos_cache_data, sin_cache_data, position_ids_data ): return torch.onnx.ops.rotary_embedding( input_data, cos_cache_data, sin_cache_data, position_ids_data, interleaved=True, ) model = Model() ep = torch.export.export( model, (input_data, cos_cache_data, sin_cache_data, position_ids_data), ) self.assertIn( "onnx.RotaryEmbedding.opset23", [str(node.target) for node in ep.graph.nodes], ) # The program can be decomposed into aten ops so it is fully compatible with the PyTorch ecosystem aten_decomped = ep.run_decompositions(torch.onnx.ops.aten_decompositions()) self.assertNotIn( "onnx.RotaryEmbedding.opset23", [str(node.target) for node in aten_decomped.graph.nodes], ) torch.testing.assert_close( aten_decomped.module()( input_data, cos_cache_data, sin_cache_data, position_ids_data ), model(input_data, cos_cache_data, sin_cache_data, position_ids_data), ) def test_rotary_embedding_opcheck(self): input_data = torch.rand(2, 3, 4, 8) position_ids_data = torch.randint(0, 50, (2, 4)).long() sin_cache_data = torch.rand(50, 4) cos_cache_data = torch.rand(50, 4) torch.library.opcheck( _impl.rotary_embedding_23, (input_data, cos_cache_data, sin_cache_data, position_ids_data), ) def test_rotary_embedding(self): input_data = torch.rand(2, 3, 4, 8) position_ids_data = torch.randint(0, 50, (2, 4)).long() sin_cache_data = torch.rand(50, 4) cos_cache_data = torch.rand(50, 4) # Eager mode is supported. Autograd is also supported so users can choose to use the op # in development and production result = torch.onnx.ops.rotary_embedding( input_data, cos_cache_data, sin_cache_data, position_ids_data ) self.assertEqual(result.shape, input_data.shape) class Model(torch.nn.Module): def forward( self, input_data, cos_cache_data, sin_cache_data, position_ids_data ): return torch.onnx.ops.rotary_embedding( input_data, cos_cache_data, sin_cache_data, position_ids_data, interleaved=True, ) model = Model() # Dynamic shapes are supported dynamic_shapes = { "input_data": {0: torch.export.Dim.DYNAMIC}, "cos_cache_data": None, "sin_cache_data": None, "position_ids_data": {0: torch.export.Dim.DYNAMIC}, } onnx_program = self.export( model, (input_data, cos_cache_data, sin_cache_data, position_ids_data), dynamic_shapes=dynamic_shapes, opset_version=23, ) self.assertEqual(onnx_program.model.opset_imports[""], 23) self.assertEqual("RotaryEmbedding", onnx_program.model.graph.node(0).op_type) onnx_testing.assert_onnx_program(onnx_program) def test_rotary_embedding_3d(self): num_heads = 2 input_data = torch.rand(2, 3, 8) sin_cache_data = torch.rand(2, 3, 2) cos_cache_data = torch.rand(2, 3, 2) class Model(torch.nn.Module): def forward(self, input_data, cos_cache_data, sin_cache_data): return torch.onnx.ops.rotary_embedding( input_data, cos_cache_data, sin_cache_data, num_heads=num_heads, ) model = Model() # Dynamic shapes are supported dynamic_shapes = { "input_data": {0: torch.export.Dim.DYNAMIC}, "cos_cache_data": {0: torch.export.Dim.DYNAMIC}, "sin_cache_data": {0: torch.export.Dim.DYNAMIC}, } onnx_program = self.export( model, (input_data, cos_cache_data, sin_cache_data), dynamic_shapes=dynamic_shapes, opset_version=23, ) self.assertEqual(onnx_program.model.opset_imports[""], 23) self.assertEqual("RotaryEmbedding", onnx_program.model.graph.node(0).op_type) onnx_testing.assert_onnx_program(onnx_program) def test_attention_without_past_kv_caches(self): """Test basic attention functionality.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) # Test eager mode torch.library.opcheck(_impl.attention_23, (Q, K, V)) output, present_key, present_value, qk_output = torch.onnx.ops.attention( Q, K, V ) self.assertEqual(output.shape, (batch_size, q_num_heads, q_seq_len, head_size)) self.assertEqual(present_key.shape, K.shape) self.assertEqual(present_value.shape, V.shape) self.assertEqual( qk_output.shape, (batch_size, q_num_heads, q_seq_len, kv_seq_len) ) def test_attention_3d_inputs(self): """Test attention with 3D inputs (requires num_heads parameters).""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_seq_len, q_num_heads * head_size) K = torch.rand(batch_size, kv_seq_len, kv_num_heads * head_size) V = torch.rand(batch_size, kv_seq_len, kv_num_heads * head_size) torch.library.opcheck( _impl.attention_23, (Q, K, V), dict(q_num_heads=q_num_heads, kv_num_heads=kv_num_heads), ) output, present_key, present_value, qk_output = torch.onnx.ops.attention( Q, K, V, q_num_heads=q_num_heads, kv_num_heads=kv_num_heads ) # Output should be reshaped back to 3D self.assertEqual(output.shape, (batch_size, q_seq_len, q_num_heads * head_size)) self.assertEqual( present_key.shape, (batch_size, kv_num_heads, kv_seq_len, head_size) ) self.assertEqual( present_value.shape, (batch_size, kv_num_heads, kv_seq_len, head_size) ) @common_utils.parametrize( "name, kv_num_heads", [ ("group_query_attention", 4), ("multi_query_attention", 1), ], ) def test_attention_kv_num_heads(self, name: str, kv_num_heads: int): batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads = 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) torch.library.opcheck(_impl.attention_23, (Q, K, V)) output, present_key, present_value, qk_output = torch.onnx.ops.attention( Q, K, V ) expected = torch.nn.functional.scaled_dot_product_attention( Q, K, V, None, enable_gqa=True ) self.assertEqual(output.shape, (batch_size, q_num_heads, q_seq_len, head_size)) self.assertEqual(present_key.shape, K.shape) self.assertEqual(present_value.shape, V.shape) torch.testing.assert_close(output, expected) def test_attention_mqa(self): """Test Multi-Query Attention (MQA).""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 1 # MQA: kv_num_heads = 1 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) torch.library.opcheck(_impl.attention_23, (Q, K, V)) output, present_key, present_value, qk_output = torch.onnx.ops.attention( Q, K, V ) expected = torch.nn.functional.scaled_dot_product_attention( Q, K, V, None, enable_gqa=True ) self.assertEqual(output.shape, (batch_size, q_num_heads, q_seq_len, head_size)) torch.testing.assert_close(output, expected) def test_attention_with_2d_mask(self): """Test attention with 2D attention mask (q_seq_len, kv_seq_len).""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) # Test with boolean mask bool_mask = torch.randint(0, 2, (q_seq_len, kv_seq_len), dtype=torch.bool) torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(attn_mask=bool_mask)) output_bool, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=bool_mask) # Test with float mask float_mask = torch.randn(q_seq_len, kv_seq_len) torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(attn_mask=float_mask)) output_float, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=float_mask) self.assertEqual( output_bool.shape, (batch_size, q_num_heads, q_seq_len, head_size) ) self.assertEqual( output_float.shape, (batch_size, q_num_heads, q_seq_len, head_size) ) def test_attention_with_4d_mask(self): """Test attention with 4D attention mask (batch_size, num_heads, q_seq_len, kv_seq_len).""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) # Test with boolean mask bool_mask = torch.randint( 0, 2, (batch_size, q_num_heads, q_seq_len, kv_seq_len), dtype=torch.bool ) torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(attn_mask=bool_mask)) output_bool, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=bool_mask) # Test with float mask float_mask = torch.randn(batch_size, q_num_heads, q_seq_len, kv_seq_len) torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(attn_mask=float_mask)) output_float, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=float_mask) self.assertEqual( output_bool.shape, (batch_size, q_num_heads, q_seq_len, head_size) ) self.assertEqual( output_float.shape, (batch_size, q_num_heads, q_seq_len, head_size) ) def test_attention_with_zero_float_mask(self): """Test attention with zero float mask.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) zero_mask = torch.zeros(q_seq_len, kv_seq_len) torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(attn_mask=zero_mask)) output, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=zero_mask) self.assertEqual(output.shape, (batch_size, q_num_heads, q_seq_len, head_size)) def test_attention_with_causal_mask_pattern(self): """Test attention with lower triangular causal mask pattern.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 4 # Square for causal q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) # Create a lower triangular causal mask causal_mask = torch.tril(torch.ones(q_seq_len, kv_seq_len, dtype=torch.bool)) torch.library.opcheck( _impl.attention_23, (Q, K, V), dict(attn_mask=causal_mask) ) output, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=causal_mask) self.assertEqual(output.shape, (batch_size, q_num_heads, q_seq_len, head_size)) def test_attention_with_gqa_and_mask(self): """Test attention with GQA and different mask shapes.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 4 # GQA head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) # Test 2D mask with GQA mask_2d = torch.randint(0, 2, (q_seq_len, kv_seq_len), dtype=torch.bool) torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(attn_mask=mask_2d)) output_2d, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=mask_2d) # Test 4D mask with GQA (note: using q_num_heads for mask heads) mask_4d = torch.randint( 0, 2, (batch_size, q_num_heads, q_seq_len, kv_seq_len), dtype=torch.bool ) torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(attn_mask=mask_4d)) output_4d, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=mask_4d) self.assertEqual( output_2d.shape, (batch_size, q_num_heads, q_seq_len, head_size) ) self.assertEqual( output_4d.shape, (batch_size, q_num_heads, q_seq_len, head_size) ) def test_attention_causal(self): """Test causal attention.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 4 # Square for causal q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(is_causal=True)) output, _, _, _ = torch.onnx.ops.attention(Q, K, V, is_causal=True) self.assertEqual(output.shape, (batch_size, q_num_heads, q_seq_len, head_size)) def test_attention_with_past_kv(self): """Test attention with past key/value caches.""" batch_size, q_seq_len, kv_seq_len, past_seq_len = 2, 4, 6, 3 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) past_key = torch.rand(batch_size, kv_num_heads, past_seq_len, head_size) past_value = torch.rand(batch_size, kv_num_heads, past_seq_len, head_size) torch.library.opcheck( _impl.attention_23, (Q, K, V), dict(past_key=past_key, past_value=past_value), ) output, present_key, present_value, _ = torch.onnx.ops.attention( Q, K, V, past_key=past_key, past_value=past_value ) # Present key/value should include past + current expected_total_seq_len = past_seq_len + kv_seq_len self.assertEqual( present_key.shape, (batch_size, kv_num_heads, expected_total_seq_len, head_size), ) self.assertEqual( present_value.shape, (batch_size, kv_num_heads, expected_total_seq_len, head_size), ) def test_attention_with_softcap(self): """Test attention with softcap.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(softcap=30.0)) output, _, _, _ = torch.onnx.ops.attention(Q, K, V, softcap=30.0) self.assertEqual(output.shape, (batch_size, q_num_heads, q_seq_len, head_size)) def test_attention_qk_output_modes(self): """Test different QK matmul output modes.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) for mode in [0, 1, 2, 3]: torch.library.opcheck( _impl.attention_23, (Q, K, V), dict(qk_matmul_output_mode=mode), ) output, _, _, qk_output = torch.onnx.ops.attention( Q, K, V, qk_matmul_output_mode=mode ) self.assertEqual( output.shape, (batch_size, q_num_heads, q_seq_len, head_size) ) self.assertEqual( qk_output.shape, (batch_size, q_num_heads, q_seq_len, kv_seq_len) ) def test_attention_custom_scale(self): """Test attention with custom scale factor.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) custom_scale = 0.25 torch.library.opcheck(_impl.attention_23, (Q, K, V), dict(scale=custom_scale)) output, _, _, _ = torch.onnx.ops.attention(Q, K, V, scale=custom_scale) self.assertEqual(output.shape, (batch_size, q_num_heads, q_seq_len, head_size)) def test_attention_export(self): """Test that attention can be exported to ONNX.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) class AttentionModel(torch.nn.Module): def forward(self, Q, K, V): output, _, _, _ = torch.onnx.ops.attention(Q, K, V) return output model = AttentionModel() onnx_program = self.export( model, (Q, K, V), opset_version=23, ) self.assertEqual(onnx_program.model.opset_imports[""], 23) self.assertEqual("Attention", onnx_program.model.graph.node(0).op_type) onnx_testing.assert_onnx_program(onnx_program) def test_attention_export_with_dynamic_shapes(self): """Test attention export with dynamic shapes.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) attn_mask = torch.randint( 0, 2, (batch_size, 1, q_seq_len, kv_seq_len), dtype=torch.bool ) class AttentionModel(torch.nn.Module): def forward(self, Q, K, V, attn_mask): output, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=attn_mask) return output model = AttentionModel() dynamic_shapes = { "Q": {0: "batch", 2: "q_seq_len"}, "K": {0: "batch", 2: "kv_seq_len"}, "V": {0: "batch", 2: "kv_seq_len"}, "attn_mask": {0: "batch", 2: "q_seq_len", 3: "kv_seq_len"}, } onnx_program = self.export( model, (Q, K, V, attn_mask), dynamic_shapes=dynamic_shapes, opset_version=23, ) self.assertEqual(onnx_program.model.opset_imports[""], 23) self.assertEqual("Attention", onnx_program.model.graph.node(0).op_type) node = onnx_program.model.graph.node(0) # Verify inputs self.assertEqual(len(node.inputs), 4) self.assertEqual( node.inputs[0].shape, ["batch", q_num_heads, "q_seq_len", head_size] ) self.assertEqual( node.inputs[1].shape, ["batch", kv_num_heads, "kv_seq_len", head_size] ) self.assertEqual( node.inputs[2].shape, ["batch", kv_num_heads, "kv_seq_len", head_size] ) # Verify default attributes (should be minimal) self.assertEqual(len(node.attributes), 0) onnx_testing.assert_onnx_program(onnx_program) def test_attention_3d_export(self): """Test attention export with 3D inputs.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_seq_len, q_num_heads * head_size) K = torch.rand(batch_size, kv_seq_len, kv_num_heads * head_size) V = torch.rand(batch_size, kv_seq_len, kv_num_heads * head_size) class AttentionModel(torch.nn.Module): def forward(self, Q, K, V): output, _, _, _ = torch.onnx.ops.attention( Q, K, V, q_num_heads=q_num_heads, kv_num_heads=kv_num_heads ) return output model = AttentionModel() onnx_program = self.export( model, (Q, K, V), opset_version=23, ) self.assertEqual(onnx_program.model.opset_imports[""], 23) self.assertEqual("Attention", onnx_program.model.graph.node(0).op_type) onnx_testing.assert_onnx_program(onnx_program) def test_attention_decomposition(self): """Test that attention can be decomposed to aten ops.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) class AttentionModel(torch.nn.Module): def forward(self, Q, K, V): output, present_key, present_value, qk_output = ( torch.onnx.ops.attention(Q, K, V) ) return output model = AttentionModel() ep = torch.export.export(model, (Q, K, V)) self.assertIn( "onnx.Attention.opset23", [str(node.target) for node in ep.graph.nodes], ) # The program can be decomposed into aten ops aten_decomped = ep.run_decompositions(torch.onnx.ops.aten_decompositions()) self.assertNotIn( "onnx.Attention.opset23", [str(node.target) for node in aten_decomped.graph.nodes], ) # Results should match torch.testing.assert_close( aten_decomped.module()(Q, K, V), model(Q, K, V), ) def test_attention_export_with_past_key_value(self): """Test export with past_key, past_value to ensure the optional input order is correct.""" batch_size, q_seq_len, kv_seq_len, past_seq_len = 2, 4, 6, 3 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) past_key = torch.rand(batch_size, kv_num_heads, past_seq_len, head_size) past_value = torch.rand(batch_size, kv_num_heads, past_seq_len, head_size) class Model(torch.nn.Module): def forward(self, Q, K, V, past_key, past_value): output, present_key, present_value, _ = torch.onnx.ops.attention( Q, K, V, past_key=past_key, attn_mask=None, # Switched argument order past_value=past_value, ) return output, present_key, present_value model = Model() onnx_program = self.export( model, (Q, K, V, past_key, past_value), opset_version=23 ) node = onnx_program.model.graph.node(0) self.assertEqual(node.op_type, "Attention") # Verify all 6 inputs are present self.assertEqual( len(node.inputs), 6 ) # Q, K, V, attn_mask, past_key, past_value self.assertEqual( node.inputs[0].shape, [batch_size, q_num_heads, q_seq_len, head_size] ) self.assertEqual( node.inputs[1].shape, [batch_size, kv_num_heads, kv_seq_len, head_size] ) self.assertEqual( node.inputs[2].shape, [batch_size, kv_num_heads, kv_seq_len, head_size] ) self.assertIsNone(node.inputs[3]) self.assertEqual( node.inputs[4].shape, [batch_size, kv_num_heads, past_seq_len, head_size] ) self.assertEqual( node.inputs[5].shape, [batch_size, kv_num_heads, past_seq_len, head_size] ) onnx_testing.assert_onnx_program(onnx_program) def test_attention_export_with_all_optional_inputs(self): """Test export with all optional inputs: mask, past_key, past_value.""" batch_size, q_seq_len, kv_seq_len, past_seq_len = 2, 4, 6, 3 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) attn_mask = torch.randint( 0, 2, (1, 1, q_seq_len, kv_seq_len + past_seq_len), dtype=torch.bool ) past_key = torch.rand(batch_size, kv_num_heads, past_seq_len, head_size) past_value = torch.rand(batch_size, kv_num_heads, past_seq_len, head_size) class FullAttentionModel(torch.nn.Module): def forward(self, Q, K, V, attn_mask, past_key, past_value): output, present_key, present_value, qk_matmul = ( torch.onnx.ops.attention( Q, K, V, attn_mask=attn_mask, past_key=past_key, past_value=past_value, ) ) return output, present_key, present_value, qk_matmul model = FullAttentionModel() onnx_program = self.export( model, (Q, K, V, attn_mask, past_key, past_value), opset_version=23 ) node = onnx_program.model.graph.node(0) self.assertEqual(node.op_type, "Attention") # Verify all 6 inputs are present self.assertEqual( len(node.inputs), 6 ) # Q, K, V, attn_mask, past_key, past_value self.assertEqual( node.inputs[0].shape, [batch_size, q_num_heads, q_seq_len, head_size] ) self.assertEqual( node.inputs[1].shape, [batch_size, kv_num_heads, kv_seq_len, head_size] ) self.assertEqual( node.inputs[2].shape, [batch_size, kv_num_heads, kv_seq_len, head_size] ) self.assertEqual( node.inputs[3].shape, [1, 1, q_seq_len, kv_seq_len + past_seq_len] ) self.assertEqual( node.inputs[4].shape, [batch_size, kv_num_heads, past_seq_len, head_size] ) self.assertEqual( node.inputs[5].shape, [batch_size, kv_num_heads, past_seq_len, head_size] ) onnx_testing.assert_onnx_program(onnx_program) def test_attention_export_3d_with_num_heads_attributes(self): """Test export with 3D inputs and explicit num_heads attributes.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 4 # GQA head_size = 64 Q = torch.rand(batch_size, q_seq_len, q_num_heads * head_size) K = torch.rand(batch_size, kv_seq_len, kv_num_heads * head_size) V = torch.rand(batch_size, kv_seq_len, kv_num_heads * head_size) class Attention3DModel(torch.nn.Module): def forward(self, Q, K, V): output, _, _, _ = torch.onnx.ops.attention( Q, K, V, q_num_heads=q_num_heads, kv_num_heads=kv_num_heads ) return output model = Attention3DModel() onnx_program = self.export(model, (Q, K, V), opset_version=23) node = onnx_program.model.graph.node(0) self.assertEqual(node.op_type, "Attention") # Verify 3D input shapes self.assertEqual( node.inputs[0].shape, [batch_size, q_seq_len, q_num_heads * head_size] ) self.assertEqual( node.inputs[1].shape, [batch_size, kv_seq_len, kv_num_heads * head_size] ) self.assertEqual( node.inputs[2].shape, [batch_size, kv_seq_len, kv_num_heads * head_size] ) # Verify num_heads attributes are set attrs = node.attributes self.assertIn("q_num_heads", attrs) self.assertIn("kv_num_heads", attrs) self.assertEqual(attrs["q_num_heads"].value, q_num_heads) self.assertEqual(attrs["kv_num_heads"].value, kv_num_heads) onnx_testing.assert_onnx_program(onnx_program) def test_attention_export_with_all_attributes(self): """Test export with all possible attributes set.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) class FullAttributesModel(torch.nn.Module): def forward(self, Q, K, V): output, _, _, _ = torch.onnx.ops.attention( Q, K, V, is_causal=True, qk_matmul_output_mode=2, scale=0.25, softcap=30.0, softmax_precision=1, # FLOAT ) return output model = FullAttributesModel() onnx_program = self.export(model, (Q, K, V), opset_version=23) node = onnx_program.model.graph.node(0) self.assertEqual(node.op_type, "Attention") # Verify all attributes are set correctly attrs = node.attributes self.assertIn("is_causal", attrs) self.assertIn("qk_matmul_output_mode", attrs) self.assertIn("scale", attrs) self.assertIn("softcap", attrs) self.assertIn("softmax_precision", attrs) self.assertEqual(attrs["is_causal"].value, 1) # True as int self.assertEqual(attrs["qk_matmul_output_mode"].value, 2) self.assertAlmostEqual(attrs["scale"].value, 0.25, places=6) self.assertAlmostEqual(attrs["softcap"].value, 30.0, places=6) self.assertEqual(attrs["softmax_precision"].value, 1) onnx_testing.assert_onnx_program(onnx_program) def test_attention_export_with_different_mask_shapes(self): """Test export with different attention mask shapes.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) # Test 2D mask mask_2d = torch.randint(0, 2, (q_seq_len, kv_seq_len), dtype=torch.bool) class Mask2DModel(torch.nn.Module): def forward(self, Q, K, V, mask): output, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=mask) return output model_2d = Mask2DModel() onnx_program_2d = self.export(model_2d, (Q, K, V, mask_2d), opset_version=23) node_2d = onnx_program_2d.model.graph.node(0) self.assertEqual(node_2d.inputs[3].shape, [q_seq_len, kv_seq_len]) onnx_testing.assert_onnx_program(onnx_program_2d) # Test 3D mask mask_3d = torch.randint( 0, 2, (batch_size, 1, q_seq_len, kv_seq_len), dtype=torch.bool ) class Mask3DModel(torch.nn.Module): def forward(self, Q, K, V, mask): output, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=mask) return output model_3d = Mask3DModel() onnx_program_3d = self.export(model_3d, (Q, K, V, mask_3d), opset_version=23) node_3d = onnx_program_3d.model.graph.node(0) self.assertEqual( node_3d.inputs[3].shape, [batch_size, 1, q_seq_len, kv_seq_len] ) onnx_testing.assert_onnx_program(onnx_program_3d) # Test 4D mask mask_4d = torch.randint( 0, 2, (batch_size, q_num_heads, q_seq_len, kv_seq_len), dtype=torch.bool ) class Mask4DModel(torch.nn.Module): def forward(self, Q, K, V, mask): output, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=mask) return output model_4d = Mask4DModel() onnx_program_4d = self.export(model_4d, (Q, K, V, mask_4d), opset_version=23) node_4d = onnx_program_4d.model.graph.node(0) self.assertEqual( node_4d.inputs[3].shape, [batch_size, q_num_heads, q_seq_len, kv_seq_len] ) onnx_testing.assert_onnx_program(onnx_program_4d) def test_attention_export_with_float_mask(self): """Test export with float attention mask.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) float_mask = torch.randn(q_seq_len, kv_seq_len) class FloatMaskModel(torch.nn.Module): def forward(self, Q, K, V, mask): output, _, _, _ = torch.onnx.ops.attention(Q, K, V, attn_mask=mask) return output model = FloatMaskModel() onnx_program = self.export(model, (Q, K, V, float_mask), opset_version=23) node = onnx_program.model.graph.node(0) self.assertEqual(node.op_type, "Attention") self.assertEqual(node.inputs[3].shape, [q_seq_len, kv_seq_len]) # Verify the mask input has float dtype in the ONNX model self.assertEqual(node.inputs[3].dtype, ir.DataType.FLOAT) onnx_testing.assert_onnx_program(onnx_program) def test_attention_export_qk_output_modes(self): """Test export with different QK output modes.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) for mode in [0, 1, 2, 3]: class QKOutputModel(torch.nn.Module): def __init__(self, qk_mode): super().__init__() self.qk_mode = qk_mode def forward(self, Q, K, V): output, _, _, qk_output = torch.onnx.ops.attention( Q, K, V, qk_matmul_output_mode=self.qk_mode ) return output, qk_output model = QKOutputModel(mode) onnx_program = self.export(model, (Q, K, V), opset_version=23) node = onnx_program.model.graph.node(0) self.assertEqual(node.op_type, "Attention") # Verify qk_matmul_output_mode attribute attrs = node.attributes if mode != 0: self.assertIn("qk_matmul_output_mode", attrs) self.assertEqual(attrs["qk_matmul_output_mode"].value, mode) # Verify 4 outputs (output, present_key, present_value, qk_output) self.assertEqual(len(node.outputs), 4) onnx_testing.assert_onnx_program(onnx_program) def test_attention_export_mqa(self): """Test export with Multi-Query Attention (MQA).""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 1 # MQA head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) class MQAModel(torch.nn.Module): def forward(self, Q, K, V): output, _, _, _ = torch.onnx.ops.attention(Q, K, V) return output model = MQAModel() onnx_program = self.export(model, (Q, K, V), opset_version=23) node = onnx_program.model.graph.node(0) self.assertEqual(node.op_type, "Attention") # Verify MQA tensor shapes self.assertEqual( node.inputs[0].shape, [batch_size, q_num_heads, q_seq_len, head_size] ) self.assertEqual( node.inputs[1].shape, [batch_size, kv_num_heads, kv_seq_len, head_size] ) # kv_num_heads = 1 self.assertEqual( node.inputs[2].shape, [batch_size, kv_num_heads, kv_seq_len, head_size] ) onnx_testing.assert_onnx_program(onnx_program) @common_utils.parametrize( "precision_enum, precision_name", [ (1, "FLOAT"), (10, "FLOAT16"), (11, "DOUBLE"), (16, "BFLOAT16"), ], ) def test_attention_export_with_softmax_precision( self, precision_enum, precision_name: str ): """Test export with different softmax precision values.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 8 head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) class SoftmaxPrecisionModel(torch.nn.Module): def __init__(self, precision): super().__init__() self.precision = precision def forward(self, Q, K, V): output, _, _, _ = torch.onnx.ops.attention( Q, K, V, softmax_precision=self.precision ) return output model = SoftmaxPrecisionModel(precision_enum) onnx_program = self.export(model, (Q, K, V), opset_version=23) node = onnx_program.model.graph.node(0) self.assertEqual(node.op_type, "Attention") # Verify softmax_precision attribute attrs = node.attributes self.assertIn("softmax_precision", attrs) self.assertEqual(attrs["softmax_precision"].value, precision_enum) onnx_testing.assert_onnx_program(onnx_program, atol=2e-3, rtol=6e-3) def test_attention_export_gqa(self): """Test export and verify output tensor shapes.""" batch_size, q_seq_len, kv_seq_len = 2, 4, 6 q_num_heads, kv_num_heads = 8, 4 # GQA head_size = 64 Q = torch.rand(batch_size, q_num_heads, q_seq_len, head_size) K = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) V = torch.rand(batch_size, kv_num_heads, kv_seq_len, head_size) class AttentionOutputsModel(torch.nn.Module): def forward(self, Q, K, V): result, _, _, _ = torch.onnx.ops.attention(Q, K, V) return result model = AttentionOutputsModel() onnx_program = self.export(model, (Q, K, V), opset_version=23) node = onnx_program.model.graph.node(0) self.assertEqual(node.op_type, "Attention") # Verify all 4 outputs have correct shapes outputs = node.outputs self.assertEqual(len(outputs), 4) # output: (batch_size, q_num_heads, q_seq_len, head_size) self.assertEqual( outputs[0].shape, [batch_size, q_num_heads, q_seq_len, head_size] ) onnx_testing.assert_onnx_program(onnx_program) if __name__ == "__main__": common_utils.run_tests()
NativeOnnxOpsTest
python
MongoEngine__mongoengine
tests/queryset/test_geo.py
{ "start": 157, "end": 22656 }
class ____(MongoDBTestCase): def _create_event_data(self, point_field_class=GeoPointField): """Create some sample data re-used in many of the tests below.""" class Event(Document): title = StringField() date = DateTimeField() location = point_field_class() def __unicode__(self): return self.title self.Event = Event Event.drop_collection() event1 = Event.objects.create( title="Coltrane Motion @ Double Door", date=datetime.datetime.now() - datetime.timedelta(days=1), location=[-87.677137, 41.909889], ) event2 = Event.objects.create( title="Coltrane Motion @ Bottom of the Hill", date=datetime.datetime.now() - datetime.timedelta(days=10), location=[-122.4194155, 37.7749295], ) event3 = Event.objects.create( title="Coltrane Motion @ Empty Bottle", date=datetime.datetime.now(), location=[-87.686638, 41.900474], ) return event1, event2, event3 def test_near(self): """Make sure the "near" operator works.""" event1, event2, event3 = self._create_event_data() # find all events "near" pitchfork office, chicago. # note that "near" will show the san francisco event, too, # although it sorts to last. events = self.Event.objects(location__near=[-87.67892, 41.9120459]) if PYMONGO_VERSION < (4,): assert events.count() == 3 assert list(events) == [event1, event3, event2] # ensure ordering is respected by "near" events = self.Event.objects(location__near=[-87.67892, 41.9120459]) events = events.order_by("-date") if PYMONGO_VERSION < (4,): assert events.count() == 3 assert list(events) == [event3, event1, event2] def test_near_and_max_distance(self): """Ensure the "max_distance" operator works alongside the "near" operator. """ event1, event2, event3 = self._create_event_data() # find events within 10 degrees of san francisco point = [-122.415579, 37.7566023] events = self.Event.objects(location__near=point, location__max_distance=10) if PYMONGO_VERSION < (4,): assert events.count() == 1 assert list(events) == [event2] def test_near_and_min_distance(self): """Ensure the "min_distance" operator works alongside the "near" operator. """ event1, event2, event3 = self._create_event_data() # find events at least 10 degrees away of san francisco point = [-122.415579, 37.7566023] events = self.Event.objects(location__near=point, location__min_distance=10) if PYMONGO_VERSION < (4,): assert events.count() == 2 assert list(events) == [event3, event1] def test_within_distance(self): """Make sure the "within_distance" operator works.""" event1, event2, event3 = self._create_event_data() # find events within 5 degrees of pitchfork office, chicago point_and_distance = [[-87.67892, 41.9120459], 5] events = self.Event.objects(location__within_distance=point_and_distance) assert events.count() == 2 events = list(events) assert event2 not in events assert event1 in events assert event3 in events # find events within 10 degrees of san francisco point_and_distance = [[-122.415579, 37.7566023], 10] events = self.Event.objects(location__within_distance=point_and_distance) assert events.count() == 1 assert events[0] == event2 # find events within 1 degree of greenpoint, broolyn, nyc, ny point_and_distance = [[-73.9509714, 40.7237134], 1] events = self.Event.objects(location__within_distance=point_and_distance) assert events.count() == 0 # ensure ordering is respected by "within_distance" point_and_distance = [[-87.67892, 41.9120459], 10] events = self.Event.objects(location__within_distance=point_and_distance) events = events.order_by("-date") assert events.count() == 2 assert events[0] == event3 def test_within_box(self): """Ensure the "within_box" operator works.""" event1, event2, event3 = self._create_event_data() # check that within_box works box = [(-125.0, 35.0), (-100.0, 40.0)] events = self.Event.objects(location__within_box=box) assert events.count() == 1 assert events[0].id == event2.id def test_within_polygon(self): """Ensure the "within_polygon" operator works.""" event1, event2, event3 = self._create_event_data() polygon = [ (-87.694445, 41.912114), (-87.69084, 41.919395), (-87.681742, 41.927186), (-87.654276, 41.911731), (-87.656164, 41.898061), ] events = self.Event.objects(location__within_polygon=polygon) assert events.count() == 1 assert events[0].id == event1.id polygon2 = [ (-1.742249, 54.033586), (-1.225891, 52.792797), (-4.40094, 53.389881), ] events = self.Event.objects(location__within_polygon=polygon2) assert events.count() == 0 def test_2dsphere_near(self): """Make sure the "near" operator works with a PointField, which corresponds to a 2dsphere index. """ event1, event2, event3 = self._create_event_data(point_field_class=PointField) # find all events "near" pitchfork office, chicago. # note that "near" will show the san francisco event, too, # although it sorts to last. events = self.Event.objects(location__near=[-87.67892, 41.9120459]) if PYMONGO_VERSION < (4,): assert events.count() == 3 assert list(events) == [event1, event3, event2] # ensure ordering is respected by "near" events = self.Event.objects(location__near=[-87.67892, 41.9120459]) events = events.order_by("-date") if PYMONGO_VERSION < (4,): assert events.count() == 3 assert list(events) == [event3, event1, event2] def test_2dsphere_near_and_max_distance(self): """Ensure the "max_distance" operator works alongside the "near" operator with a 2dsphere index. """ event1, event2, event3 = self._create_event_data(point_field_class=PointField) # find events within 10km of san francisco point = [-122.415579, 37.7566023] events = self.Event.objects(location__near=point, location__max_distance=10000) if PYMONGO_VERSION < (4,): assert events.count() == 1 assert list(events) == [event2] # find events within 1km of greenpoint, broolyn, nyc, ny events = self.Event.objects( location__near=[-73.9509714, 40.7237134], location__max_distance=1000 ) if PYMONGO_VERSION < (4,): assert events.count() == 0 assert list(events) == [] # ensure ordering is respected by "near" events = self.Event.objects( location__near=[-87.67892, 41.9120459], location__max_distance=10000 ).order_by("-date") if PYMONGO_VERSION < (4,): assert events.count() == 2 assert list(events) == [event3, event1] def test_2dsphere_geo_within_box(self): """Ensure the "geo_within_box" operator works with a 2dsphere index. """ event1, event2, event3 = self._create_event_data(point_field_class=PointField) # check that within_box works box = [(-125.0, 35.0), (-100.0, 40.0)] events = self.Event.objects(location__geo_within_box=box) assert events.count() == 1 assert events[0].id == event2.id def test_2dsphere_geo_within_polygon(self): """Ensure the "geo_within_polygon" operator works with a 2dsphere index. """ event1, event2, event3 = self._create_event_data(point_field_class=PointField) polygon = [ (-87.694445, 41.912114), (-87.69084, 41.919395), (-87.681742, 41.927186), (-87.654276, 41.911731), (-87.656164, 41.898061), ] events = self.Event.objects(location__geo_within_polygon=polygon) assert events.count() == 1 assert events[0].id == event1.id polygon2 = [ (-1.742249, 54.033586), (-1.225891, 52.792797), (-4.40094, 53.389881), ] events = self.Event.objects(location__geo_within_polygon=polygon2) assert events.count() == 0 def test_2dsphere_near_and_min_max_distance(self): """Ensure "min_distance" and "max_distance" operators work well together with the "near" operator in a 2dsphere index. """ event1, event2, event3 = self._create_event_data(point_field_class=PointField) # ensure min_distance and max_distance combine well events = self.Event.objects( location__near=[-87.67892, 41.9120459], location__min_distance=1000, location__max_distance=10000, ).order_by("-date") if PYMONGO_VERSION < (4,): assert events.count() == 1 assert list(events) == [event3] # ensure ordering is respected by "near" with "min_distance" events = self.Event.objects( location__near=[-87.67892, 41.9120459], location__min_distance=10000 ).order_by("-date") if PYMONGO_VERSION < (4,): assert events.count() == 1 assert list(events) == [event2] def test_2dsphere_geo_within_center(self): """Make sure the "geo_within_center" operator works with a 2dsphere index. """ event1, event2, event3 = self._create_event_data(point_field_class=PointField) # find events within 5 degrees of pitchfork office, chicago point_and_distance = [[-87.67892, 41.9120459], 2] events = self.Event.objects(location__geo_within_center=point_and_distance) assert events.count() == 2 events = list(events) assert event2 not in events assert event1 in events assert event3 in events def _test_embedded(self, point_field_class): """Helper test method ensuring given point field class works well in an embedded document. """ class Venue(EmbeddedDocument): location = point_field_class() name = StringField() class Event(Document): title = StringField() venue = EmbeddedDocumentField(Venue) Event.drop_collection() venue1 = Venue(name="The Rock", location=[-87.677137, 41.909889]) venue2 = Venue(name="The Bridge", location=[-122.4194155, 37.7749295]) event1 = Event(title="Coltrane Motion @ Double Door", venue=venue1).save() event2 = Event( title="Coltrane Motion @ Bottom of the Hill", venue=venue2 ).save() event3 = Event(title="Coltrane Motion @ Empty Bottle", venue=venue1).save() # find all events "near" pitchfork office, chicago. # note that "near" will show the san francisco event, too, # although it sorts to last. events = Event.objects(venue__location__near=[-87.67892, 41.9120459]) if PYMONGO_VERSION < (4,): assert events.count() == 3 assert list(events) == [event1, event3, event2] def test_geo_spatial_embedded(self): """Make sure GeoPointField works properly in an embedded document.""" self._test_embedded(point_field_class=GeoPointField) def test_2dsphere_point_embedded(self): """Make sure PointField works properly in an embedded document.""" self._test_embedded(point_field_class=PointField) def test_spherical_geospatial_operators(self): """Ensure that spherical geospatial queries are working.""" class Point(Document): location = GeoPointField() Point.drop_collection() # These points are one degree apart, which (according to Google Maps) # is about 110 km apart at this place on the Earth. north_point = Point(location=[-122, 38]).save() # Near Concord, CA south_point = Point(location=[-122, 37]).save() # Near Santa Cruz, CA earth_radius = 6378.009 # in km (needs to be a float for dividing by) # Finds both points because they are within 60 km of the reference # point equidistant between them. points = Point.objects(location__near_sphere=[-122, 37.5]) if PYMONGO_VERSION < (4,): assert points.count() == 2 assert list(points) == [north_point, south_point] # Same behavior for _within_spherical_distance points = Point.objects( location__within_spherical_distance=[[-122, 37.5], 60 / earth_radius] ) assert points.count() == 2 points = Point.objects( location__near_sphere=[-122, 37.5], location__max_distance=60 / earth_radius ) if PYMONGO_VERSION < (4,): assert points.count() == 2 assert list(points) == [north_point, south_point] # Test query works with max_distance, being farer from one point points = Point.objects( location__near_sphere=[-122, 37.8], location__max_distance=60 / earth_radius ) close_point = points.first() if PYMONGO_VERSION < (4,): assert points.count() == 1 assert list(points) == [north_point] # Test query works with min_distance, being farer from one point points = Point.objects( location__near_sphere=[-122, 37.8], location__min_distance=60 / earth_radius ) if PYMONGO_VERSION < (4,): assert points.count() == 1 far_point = points.first() assert list(points) == [south_point] assert close_point != far_point # Finds both points, but orders the north point first because it's # closer to the reference point to the north. points = Point.objects(location__near_sphere=[-122, 38.5]) if PYMONGO_VERSION < (4,): assert points.count() == 2 assert list(points) == [north_point, south_point] # Finds both points, but orders the south point first because it's # closer to the reference point to the south. points = Point.objects(location__near_sphere=[-122, 36.5]) if PYMONGO_VERSION < (4,): assert points.count() == 2 assert list(points) == [south_point, north_point] # Finds only one point because only the first point is within 60km of # the reference point to the south. points = Point.objects( location__within_spherical_distance=[[-122, 36.5], 60 / earth_radius] ) assert points.count() == 1 assert points[0].id == south_point.id def test_linestring(self): class Road(Document): name = StringField() line = LineStringField() Road.drop_collection() road = Road(name="66", line=[[40, 5], [41, 6]]) road.save() # near point = {"type": "Point", "coordinates": [40, 5]} roads = Road.objects.filter(line__near=point["coordinates"]) if PYMONGO_VERSION < (4,): assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(line__near=point) if PYMONGO_VERSION < (4,): assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(line__near={"$geometry": point}) if PYMONGO_VERSION < (4,): assert roads.count() == 1 assert list(roads) == [road] # Within polygon = { "type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]], } roads = Road.objects.filter(line__geo_within=polygon["coordinates"]) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(line__geo_within=polygon) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(line__geo_within={"$geometry": polygon}) assert roads.count() == 1 assert list(roads) == [road] # Intersects line = {"type": "LineString", "coordinates": [[40, 5], [40, 6]]} roads = Road.objects.filter(line__geo_intersects=line["coordinates"]) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(line__geo_intersects=line) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(line__geo_intersects={"$geometry": line}) assert roads.count() == 1 assert list(roads) == [road] polygon = { "type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]], } roads = Road.objects.filter(line__geo_intersects=polygon["coordinates"]) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(line__geo_intersects=polygon) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(line__geo_intersects={"$geometry": polygon}) assert roads.count() == 1 assert list(roads) == [road] def test_polygon(self): class Road(Document): name = StringField() poly = PolygonField() Road.drop_collection() road = Road(name="66", poly=[[[40, 5], [40, 6], [41, 6], [40, 5]]]) road.save() # near point = {"type": "Point", "coordinates": [40, 5]} roads = Road.objects.filter(poly__near=point["coordinates"]) if PYMONGO_VERSION < (4,): assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(poly__near=point) if PYMONGO_VERSION < (4,): assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(poly__near={"$geometry": point}) if PYMONGO_VERSION < (4,): assert roads.count() == 1 assert list(roads) == [road] # Within polygon = { "type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]], } roads = Road.objects.filter(poly__geo_within=polygon["coordinates"]) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(poly__geo_within=polygon) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(poly__geo_within={"$geometry": polygon}) assert roads.count() == 1 assert list(roads) == [road] # Intersects line = {"type": "LineString", "coordinates": [[40, 5], [41, 6]]} roads = Road.objects.filter(poly__geo_intersects=line["coordinates"]) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(poly__geo_intersects=line) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(poly__geo_intersects={"$geometry": line}) assert roads.count() == 1 assert list(roads) == [road] polygon = { "type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]], } roads = Road.objects.filter(poly__geo_intersects=polygon["coordinates"]) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(poly__geo_intersects=polygon) assert roads.count() == 1 assert list(roads) == [road] roads = Road.objects.filter(poly__geo_intersects={"$geometry": polygon}) assert roads.count() == 1 assert list(roads) == [road] def test_aspymongo_with_only(self): """Ensure as_pymongo works with only""" class Place(Document): location = PointField() Place.drop_collection() p = Place(location=[24.946861267089844, 60.16311983618494]) p.save() qs = Place.objects().only("location") assert qs.as_pymongo()[0]["location"] == { "type": "Point", "coordinates": [24.946861267089844, 60.16311983618494], } def test_2dsphere_point_sets_correctly(self): class Location(Document): loc = PointField() Location.drop_collection() Location(loc=[1, 2]).save() loc = Location.objects.as_pymongo()[0] assert loc["loc"] == {"type": "Point", "coordinates": [1, 2]} Location.objects.update(set__loc=[2, 1]) loc = Location.objects.as_pymongo()[0] assert loc["loc"] == {"type": "Point", "coordinates": [2, 1]} def test_2dsphere_linestring_sets_correctly(self): class Location(Document): line = LineStringField() Location.drop_collection() Location(line=[[1, 2], [2, 2]]).save() loc = Location.objects.as_pymongo()[0] assert loc["line"] == {"type": "LineString", "coordinates": [[1, 2], [2, 2]]} Location.objects.update(set__line=[[2, 1], [1, 2]]) loc = Location.objects.as_pymongo()[0] assert loc["line"] == {"type": "LineString", "coordinates": [[2, 1], [1, 2]]} def test_geojson_PolygonField(self): class Location(Document): poly = PolygonField() Location.drop_collection() Location(poly=[[[40, 5], [40, 6], [41, 6], [40, 5]]]).save() loc = Location.objects.as_pymongo()[0] assert loc["poly"] == { "type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [40, 5]]], } Location.objects.update(set__poly=[[[40, 4], [40, 6], [41, 6], [40, 4]]]) loc = Location.objects.as_pymongo()[0] assert loc["poly"] == { "type": "Polygon", "coordinates": [[[40, 4], [40, 6], [41, 6], [40, 4]]], } if __name__ == "__main__": unittest.main()
TestGeoQueries
python
keon__algorithms
tests/test_dfs.py
{ "start": 1186, "end": 1725 }
class ____(unittest.TestCase): def test_pacific_atlantic(self): self.assertEqual([[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]], pacific_atlantic([[1, 2, 2, 3, 5], [3, 2, 3, 4, 4], [2, 4, 5, 3, 1], [6, 7, 1, 4, 5], [5, 1, 1, 2, 4]]))
TestPacificAtlantic
python
plotly__plotly.py
_plotly_utils/basevalidators.py
{ "start": 59359, "end": 68855 }
class ____(BaseValidator): """ "info_array": { "description": "An {array} of plot information.", "requiredOpts": [ "items" ], "otherOpts": [ "dflt", "freeLength", "dimensions" ] } """ def __init__( self, plotly_name, parent_name, items, free_length=None, dimensions=None, **kwargs, ): super(InfoArrayValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, **kwargs ) self.items = items self.dimensions = dimensions if dimensions else 1 self.free_length = free_length # Instantiate validators for each info array element self.item_validators = [] info_array_items = self.items if isinstance(self.items, list) else [self.items] for i, item in enumerate(info_array_items): element_name = "{name}[{i}]".format(name=plotly_name, i=i) item_validator = InfoArrayValidator.build_validator( item, element_name, parent_name ) self.item_validators.append(item_validator) def description(self): # Cases # 1) self.items is array, self.dimensions is 1 # a) free_length=True # b) free_length=False # 2) self.items is array, self.dimensions is 2 # (requires free_length=True) # 3) self.items is scalar (requires free_length=True) # a) dimensions=1 # b) dimensions=2 # # dimensions can be set to '1-2' to indicate the both are accepted # desc = """\ The '{plotly_name}' property is an info array that may be specified as:\ """.format(plotly_name=self.plotly_name) if isinstance(self.items, list): # ### Case 1 ### if self.dimensions in (1, "1-2"): upto = " up to" if self.free_length and self.dimensions == 1 else "" desc += """ * a list or tuple of{upto} {N} elements where:\ """.format(upto=upto, N=len(self.item_validators)) for i, item_validator in enumerate(self.item_validators): el_desc = item_validator.description().strip() desc = ( desc + """ ({i}) {el_desc}""".format(i=i, el_desc=el_desc) ) # ### Case 2 ### if self.dimensions in ("1-2", 2): assert self.free_length desc += """ * a 2D list where:""" for i, item_validator in enumerate(self.item_validators): # Update name for 2d orig_name = item_validator.plotly_name item_validator.plotly_name = "{name}[i][{i}]".format( name=self.plotly_name, i=i ) el_desc = item_validator.description().strip() desc = ( desc + """ ({i}) {el_desc}""".format(i=i, el_desc=el_desc) ) item_validator.plotly_name = orig_name else: # ### Case 3 ### assert self.free_length item_validator = self.item_validators[0] orig_name = item_validator.plotly_name if self.dimensions in (1, "1-2"): item_validator.plotly_name = "{name}[i]".format(name=self.plotly_name) el_desc = item_validator.description().strip() desc += """ * a list of elements where: {el_desc} """.format(el_desc=el_desc) if self.dimensions in ("1-2", 2): item_validator.plotly_name = "{name}[i][j]".format( name=self.plotly_name ) el_desc = item_validator.description().strip() desc += """ * a 2D list where: {el_desc} """.format(el_desc=el_desc) item_validator.plotly_name = orig_name return desc @staticmethod def build_validator(validator_info, plotly_name, parent_name): datatype = validator_info["valType"] # type: str validator_classname = datatype.title().replace("_", "") + "Validator" validator_class = eval(validator_classname) kwargs = { k: validator_info[k] for k in validator_info if k not in ["valType", "description", "role"] } return validator_class( plotly_name=plotly_name, parent_name=parent_name, **kwargs ) def validate_element_with_indexed_name(self, val, validator, inds): """ Helper to add indexes to a validator's name, call validate_coerce on a value, then restore the original validator name. This makes sure that if a validation error message is raised, the property name the user sees includes the index(es) of the offending element. Parameters ---------- val: A value to be validated validator A validator inds List of one or more non-negative integers that represent the nested index of the value being validated Returns ------- val validated value Raises ------ ValueError if val fails validation """ orig_name = validator.plotly_name new_name = self.plotly_name for i in inds: new_name += "[" + str(i) + "]" validator.plotly_name = new_name try: val = validator.validate_coerce(val) finally: validator.plotly_name = orig_name return val def validate_coerce(self, v): if is_none_or_typed_array_spec(v): return None elif not is_array(v): self.raise_invalid_val(v) # Save off original v value to use in error reporting orig_v = v # Convert everything into nested lists # This way we don't need to worry about nested numpy arrays v = to_scalar_or_list(v) is_v_2d = v and is_array(v[0]) if is_v_2d and self.dimensions in ("1-2", 2): if is_array(self.items): # e.g. 2D list as parcoords.dimensions.constraintrange # check that all items are there for each nested element for i, row in enumerate(v): # Check row length if not is_array(row) or len(row) != len(self.items): self.raise_invalid_val(orig_v[i], [i]) for j, validator in enumerate(self.item_validators): row[j] = self.validate_element_with_indexed_name( v[i][j], validator, [i, j] ) else: # e.g. 2D list as layout.grid.subplots # check that all elements match individual validator validator = self.item_validators[0] for i, row in enumerate(v): if not is_array(row): self.raise_invalid_val(orig_v[i], [i]) for j, el in enumerate(row): row[j] = self.validate_element_with_indexed_name( el, validator, [i, j] ) elif v and self.dimensions == 2: # e.g. 1D list passed as layout.grid.subplots self.raise_invalid_val(orig_v[0], [0]) elif not is_array(self.items): # e.g. 1D list passed as layout.grid.xaxes validator = self.item_validators[0] for i, el in enumerate(v): v[i] = self.validate_element_with_indexed_name(el, validator, [i]) elif not self.free_length and len(v) != len(self.item_validators): # e.g. 3 element list as layout.xaxis.range self.raise_invalid_val(orig_v) elif self.free_length and len(v) > len(self.item_validators): # e.g. 4 element list as layout.updatemenu.button.args self.raise_invalid_val(orig_v) else: # We have a 1D array of the correct length for i, (el, validator) in enumerate(zip(v, self.item_validators)): # Validate coerce elements v[i] = validator.validate_coerce(el) return v def present(self, v): if v is None: return None else: if ( self.dimensions == 2 or self.dimensions == "1-2" and v and is_array(v[0]) ): # 2D case v = copy.deepcopy(v) for row in v: for i, (el, validator) in enumerate(zip(row, self.item_validators)): row[i] = validator.present(el) return tuple(tuple(row) for row in v) else: # 1D case v = copy.copy(v) # Call present on each of the item validators for i, (el, validator) in enumerate(zip(v, self.item_validators)): # Validate coerce elements v[i] = validator.present(el) # Return tuple form of return tuple(v)
InfoArrayValidator
python
getsentry__sentry
tests/sentry/api/endpoints/release_thresholds/test_release_thresholds_index.py
{ "start": 178, "end": 5032 }
class ____(APITestCase): endpoint = "sentry-api-0-organization-release-thresholds" method = "get" def setUp(self) -> None: super().setUp() self.user = self.create_user(is_staff=True, is_superuser=True) self.login_as(user=self.user) self.canary_environment = Environment.objects.create( organization_id=self.organization.id, name="canary" ) self.production_environment = Environment.objects.create( organization_id=self.organization.id, name="production" ) def test_get_invalid_project(self) -> None: self.get_error_response(self.organization.slug, project="foo bar") def test_get_no_project(self) -> None: self.get_error_response(self.organization.slug) def test_get_valid_project(self) -> None: ReleaseThreshold.objects.create( threshold_type=0, trigger_type=0, value=100, window_in_seconds=1800, project=self.project, environment=self.canary_environment, ) response = self.get_success_response(self.organization.slug, project=self.project.id) assert len(response.data) == 1 created_threshold = response.data[0] assert created_threshold["threshold_type"] == "total_error_count" assert created_threshold["trigger_type"] == "over" assert created_threshold["value"] == 100 assert created_threshold["window_in_seconds"] == 1800 assert created_threshold["project"]["id"] == str(self.project.id) assert created_threshold["project"]["slug"] == self.project.slug assert created_threshold["project"]["name"] == self.project.name assert created_threshold["environment"]["id"] == str(self.canary_environment.id) assert created_threshold["environment"]["name"] == self.canary_environment.name def test_get_invalid_environment(self) -> None: self.get_error_response(self.organization.slug, environment="foo bar", project="-1") def test_get_valid_no_environment(self) -> None: response = self.get_success_response(self.organization.slug, project="-1") ReleaseThreshold.objects.create( threshold_type=0, trigger_type=0, value=100, window_in_seconds=1800, project=self.project, environment=self.canary_environment, ) response = self.get_success_response(self.organization.slug, project=self.project.id) assert len(response.data) == 1 created_threshold = response.data[0] assert created_threshold["threshold_type"] == "total_error_count" assert created_threshold["trigger_type"] == "over" assert created_threshold["value"] == 100 assert created_threshold["window_in_seconds"] == 1800 assert created_threshold["project"]["id"] == str(self.project.id) assert created_threshold["project"]["slug"] == self.project.slug assert created_threshold["project"]["name"] == self.project.name assert created_threshold["environment"]["id"] == str(self.canary_environment.id) assert created_threshold["environment"]["name"] == self.canary_environment.name def test_get_valid_with_environment(self) -> None: response = self.get_success_response( self.organization.slug, project="-1", environment="canary" ) ReleaseThreshold.objects.create( threshold_type=0, trigger_type=0, value=100, window_in_seconds=1800, project=self.project, environment=self.canary_environment, ) ReleaseThreshold.objects.create( threshold_type=0, trigger_type=1, value=100, window_in_seconds=1800, project=self.project, environment=self.production_environment, ) response = self.get_success_response( self.organization.slug, project="-1", environment="canary" ) assert response.status_code == 200 assert len(response.data) == 1 created_threshold = response.data[0] assert created_threshold["threshold_type"] == "total_error_count" assert created_threshold["trigger_type"] == "over" assert created_threshold["value"] == 100 assert created_threshold["window_in_seconds"] == 1800 assert created_threshold["project"]["id"] == str(self.project.id) assert created_threshold["project"]["slug"] == self.project.slug assert created_threshold["project"]["name"] == self.project.name assert created_threshold["environment"]["id"] == str(self.canary_environment.id) assert created_threshold["environment"]["name"] == self.canary_environment.name
ReleaseThresholdTest
python
pyqtgraph__pyqtgraph
pyqtgraph/widgets/CheckTable.py
{ "start": 90, "end": 3365 }
class ____(QtWidgets.QWidget): sigStateChanged = QtCore.Signal(object, object, object) # (row, col, state) def __init__(self, columns): QtWidgets.QWidget.__init__(self) self.layout = QtWidgets.QGridLayout() self.layout.setSpacing(0) self.setLayout(self.layout) self.headers = [] self.columns = columns col = 1 for c in columns: label = VerticalLabel.VerticalLabel(c, orientation='vertical') self.headers.append(label) self.layout.addWidget(label, 0, col) col += 1 self.rowNames = [] self.rowWidgets = [] self.oldRows = {} ## remember settings from removed rows; reapply if they reappear. def updateRows(self, rows): for r in self.rowNames[:]: if r not in rows: self.removeRow(r) for r in rows: if r not in self.rowNames: self.addRow(r) def addRow(self, name): label = QtWidgets.QLabel(name) row = len(self.rowNames)+1 self.layout.addWidget(label, row, 0) checks = [] col = 1 for c in self.columns: check = QtWidgets.QCheckBox('') check.col = c check.row = name self.layout.addWidget(check, row, col) checks.append(check) if name in self.oldRows: check.setChecked(self.oldRows[name][col]) col += 1 #QtCore.QObject.connect(check, QtCore.SIGNAL('stateChanged(int)'), self.checkChanged) check.stateChanged.connect(self.checkChanged) self.rowNames.append(name) self.rowWidgets.append([label] + checks) def removeRow(self, name): row = self.rowNames.index(name) self.oldRows[name] = self.saveState()['rows'][row] ## save for later self.rowNames.pop(row) for w in self.rowWidgets[row]: w.setParent(None) #QtCore.QObject.disconnect(w, QtCore.SIGNAL('stateChanged(int)'), self.checkChanged) if isinstance(w, QtWidgets.QCheckBox): w.stateChanged.disconnect(self.checkChanged) self.rowWidgets.pop(row) for i in range(row, len(self.rowNames)): widgets = self.rowWidgets[i] for j in range(len(widgets)): widgets[j].setParent(None) self.layout.addWidget(widgets[j], i+1, j) def checkChanged(self, state): check = QtCore.QObject.sender(self) #self.emit(QtCore.SIGNAL('stateChanged'), check.row, check.col, state) self.sigStateChanged.emit(check.row, check.col, state) def saveState(self): rows = [] for i in range(len(self.rowNames)): row = [self.rowNames[i]] + [c.isChecked() for c in self.rowWidgets[i][1:]] rows.append(row) return {'cols': self.columns, 'rows': rows} def restoreState(self, state): rows = [r[0] for r in state['rows']] self.updateRows(rows) for r in state['rows']: rowNum = self.rowNames.index(r[0]) for i in range(1, len(r)): self.rowWidgets[rowNum][i].setChecked(r[i])
CheckTable
python
getsentry__sentry
src/sentry/integrations/analytics.py
{ "start": 1129, "end": 1305 }
class ____(analytics.Event): provider: str id: int organization_id: int @analytics.eventclass("integration.issue.comments.synced")
IntegrationIssueAssigneeSyncedEvent
python
ZoranPandovski__al-go-rithms
data_structures/Tree/splay_tree/python/splay_tree.py
{ "start": 5218, "end": 6528 }
class ____(unittest.TestCase): def setUp(self): self.keys = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] self.t = SplayTree() for key in self.keys: self.t.insert(key) #self.t.printTree() def testInsert(self): for key in self.keys: self.assertEqual(key, self.t.find(key).key) def testRemove(self): for key in self.keys: self.t.remove(key) self.assertEqual(self.t.find(key), None) def testLargeInserts(self): t = SplayTree() nums = 40000 gap = 307 i = gap while i != 0: t.insert(i) i = (i + gap) % nums def testIsEmpty(self): self.assertFalse(self.t.isEmpty()) t = SplayTree() self.assertTrue(t.isEmpty()) def testMinMax(self): self.assertEqual(self.t.findMin().key, 0) self.assertEqual(self.t.findMax().key, 9) if __name__ == "__main__": keys = [1, 5, 2, 9, 4, 0, 6, 7, 8, 3] t = SplayTree() for key in keys: t.insert(key) t.printTree() t.insert(5) t.printTree() t.remove(0) t.printTree() for key in [2,4,6,8]: print(key, t.find(key)) t.printTree() print(t.findMin()) print(t.findMax()) unittest.main()
TestCase
python
pytorch__pytorch
test/distributed/test_c10d_nccl.py
{ "start": 197893, "end": 200427 }
class ____(MultiProcessTestCase): @property def world_size(self): return 1 def setUp(self): super().setUp() # TORCH_NCCL_BLOCKING_WAIT overrides TORCH_NCCL_ASYNC_ERROR_HANDLING hence tests # that use TORCH_NCCL_BLOCKING_WAIT will test it as expected. os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1" # self.num_gpus = torch.cuda.device_count() self._spawn_processes() def tearDown(self): super().tearDown() try: os.remove(self.file_name) except OSError: pass class ToyModel(nn.Module): def __init__(self, rank, vocab_size, embedding_dim): super().__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim, sparse=True).to( rank ) self.linear = nn.Linear(embedding_dim, 1).to(rank) def forward(self, inputs): embedded = self.embedding(inputs) # embedded shape: (batch_size, sequence_length, embedding_dim) flattened = torch.mean(embedded, dim=1) # flattened shape: (batch_size, embedding_dim) output = self.linear(flattened) # output shape: (batch_size, 1) return output @requires_nccl() @skip_if_lt_x_gpu(1) def test_ddp_set_sparse_metadata(self): store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( "nccl", world_size=self.world_size, rank=self.rank, store=store, ) vocab_size = 5 model = SparseCollective.ToyModel( self.rank, vocab_size=vocab_size, embedding_dim=10 ) ddp_model = DistributedDataParallel(model) inputs = torch.tensor([[1, 0, 0], [0, 0, 0], [0, 0, 0]]).to(self.rank) # set sparse metadata on the DDP model indices = torch.Tensor(list(range(vocab_size))) ddp_model._set_sparse_metadata({"embedding.weight": indices}) # forward pass try: output = ddp_model(inputs) loss = output.sum() # backward pass loss.backward() self.assertTrue(ddp_model.module.embedding.weight.grad.indices, indices) except RuntimeError as e: if "NCCL does not support all_reduce with sparse tensors" in str(e): pass else: # Rethrow the exception if it's a different error raise
SparseCollective
python
apache__airflow
providers/teradata/tests/unit/teradata/transfers/test_azure_blob_to_teradata.py
{ "start": 1105, "end": 2481 }
class ____: def test_init(self): operator = AzureBlobStorageToTeradataOperator( azure_conn_id=AZURE_CONN_ID, teradata_conn_id=TERADATA_CONN_ID, teradata_table=TERADATA_TABLE, blob_source_key=BLOB_SOURCE_KEY, task_id=TASK_ID, ) assert operator.azure_conn_id == AZURE_CONN_ID assert operator.blob_source_key == BLOB_SOURCE_KEY assert operator.teradata_conn_id == TERADATA_CONN_ID assert operator.teradata_table == TERADATA_TABLE assert operator.task_id == TASK_ID @mock.patch("airflow.providers.teradata.transfers.azure_blob_to_teradata.TeradataHook") @mock.patch("airflow.providers.teradata.transfers.azure_blob_to_teradata.WasbHook") def test_execute(self, mock_hook_wasb, mock_hook_teradata): op = AzureBlobStorageToTeradataOperator( azure_conn_id=AZURE_CONN_ID, teradata_conn_id=TERADATA_CONN_ID, teradata_table=TERADATA_TABLE, blob_source_key=BLOB_SOURCE_KEY, task_id=TASK_ID, ) op.execute(context=None) mock_hook_wasb.assert_called_once_with(wasb_conn_id=AZURE_CONN_ID) mock_hook_teradata.assert_called_once_with(teradata_conn_id=TERADATA_CONN_ID) sql = "SQL" mock_hook_teradata.run(sql)
TestAzureBlobStorageToTeradataOperator
python
django__django
tests/model_meta/tests.py
{ "start": 6327, "end": 6620 }
class ____(OptionsBaseTests): def test_private_fields(self): for model, expected_names in TEST_RESULTS["private_fields"].items(): objects = model._meta.private_fields self.assertEqual(sorted(f.name for f in objects), sorted(expected_names))
PrivateFieldsTests
python
huggingface__transformers
src/transformers/models/mt5/modeling_mt5.py
{ "start": 29024, "end": 36781 }
class ____(MT5PreTrainedModel): def __init__(self, config): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) self.is_decoder = config.is_decoder self.block = nn.ModuleList( [MT5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers)] ) self.final_layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) # Initialize weights and apply final processing self.post_init() self.gradient_checkpointing = False def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if use_cache is True: if not self.is_decoder: raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder") if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache( DynamicCache(config=self.config), DynamicCache(config=self.config) ) else: past_key_values = DynamicCache(config=self.config) elif not self.is_decoder: # do not pass cache object down the line for encoder stack # it messes indexing later in decoder-stack because cache object is modified in-place past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if self.config.is_decoder: attention_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, ) else: attention_mask = create_bidirectional_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, ) encoder_extended_attention_mask = None if self.is_decoder and encoder_hidden_states is not None: encoder_extended_attention_mask = create_bidirectional_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, ) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for layer_module in self.block: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, # as a positional argument for gradient checkpointing past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=return_dict, cache_position=cache_position, ) hidden_states = layer_outputs[0] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[1] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) @auto_docstring
MT5Stack
python
kamyu104__LeetCode-Solutions
Python/longest-subarray-of-1s-after-deleting-one-element.py
{ "start": 29, "end": 429 }
class ____(object): def longestSubarray(self, nums): """ :type nums: List[int] :rtype: int """ count, left = 0, 0 for right in xrange(len(nums)): count += (nums[right] == 0) if count >= 2: count -= (nums[left] == 0) left += 1 return (right-left+1)-1 # Time: O(n) # Space: O(1)
Solution
python
joblib__joblib
joblib/parallel.py
{ "start": 23278, "end": 26711 }
class ____(object): """Wrap a sequence of (func, args, kwargs) tuples as a single callable""" def __init__( self, iterator_slice, backend_and_jobs, reducer_callback=None, pickle_cache=None ): self.items = list(iterator_slice) self._size = len(self.items) self._reducer_callback = reducer_callback if isinstance(backend_and_jobs, tuple): self._backend, self._n_jobs = backend_and_jobs else: # this is for backward compatibility purposes. Before 0.12.6, # nested backends were returned without n_jobs indications. self._backend, self._n_jobs = backend_and_jobs, None self._pickle_cache = pickle_cache if pickle_cache is not None else {} def __call__(self): # Set the default nested backend to self._backend but do not set the # change the default number of processes to -1 with parallel_config(backend=self._backend, n_jobs=self._n_jobs): return [func(*args, **kwargs) for func, args, kwargs in self.items] def __reduce__(self): if self._reducer_callback is not None: self._reducer_callback() # no need to pickle the callback. return ( BatchedCalls, (self.items, (self._backend, self._n_jobs), None, self._pickle_cache), ) def __len__(self): return self._size # Possible exit status for a task TASK_DONE = "Done" TASK_ERROR = "Error" TASK_PENDING = "Pending" ############################################################################### # CPU count that works also when multiprocessing has been disabled via # the JOBLIB_MULTIPROCESSING environment variable def cpu_count(only_physical_cores=False): """Return the number of CPUs. This delegates to loky.cpu_count that takes into account additional constraints such as Linux CFS scheduler quotas (typically set by container runtimes such as docker) and CPU affinity (for instance using the taskset command on Linux). Parameters ---------- only_physical_cores : boolean, default=False If True, does not take hyperthreading / SMT logical cores into account. """ if mp is None: return 1 return loky.cpu_count(only_physical_cores=only_physical_cores) ############################################################################### # For verbosity def _verbosity_filter(index, verbose): """Returns False for indices increasingly apart, the distance depending on the value of verbose. We use a lag increasing as the square of index """ if not verbose: return True elif verbose > 10: return False if index == 0: return False verbose = 0.5 * (11 - verbose) ** 2 scale = sqrt(index / verbose) next_scale = sqrt((index + 1) / verbose) return int(next_scale) == int(scale) ############################################################################### def delayed(function): """Decorator used to capture the arguments of a function.""" def delayed_function(*args, **kwargs): return function, args, kwargs try: delayed_function = functools.wraps(function)(delayed_function) except AttributeError: " functools.wraps fails on some callable objects " return delayed_function ###############################################################################
BatchedCalls
python
getsentry__sentry
tests/sentry/tasks/test_relay.py
{ "start": 12577, "end": 18633 }
class ____: def test_debounce( self, default_project, default_organization, invalidation_debounce_cache, django_cache, ): tasks = [] def apply_async(args=None, kwargs=None, countdown=None): assert not args tasks.append(kwargs) with mock.patch("sentry.tasks.relay.invalidate_project_config.apply_async", apply_async): invalidation_debounce_cache.mark_task_done( public_key=None, project_id=default_project.id, organization_id=None ) schedule_invalidate_project_config(project_id=default_project.id, trigger="test") schedule_invalidate_project_config(project_id=default_project.id, trigger="test") invalidation_debounce_cache.mark_task_done( public_key=None, project_id=None, organization_id=default_organization.id ) schedule_invalidate_project_config( organization_id=default_organization.id, trigger="test", trigger_details="more test" ) schedule_invalidate_project_config( organization_id=default_organization.id, trigger="test", trigger_details="more test" ) assert tasks == [ { "project_id": default_project.id, "organization_id": None, "public_key": None, "trigger": "test", "trigger_details": None, }, { "project_id": None, "organization_id": default_organization.id, "public_key": None, "trigger": "test", "trigger_details": "more test", }, ] def test_invalidate( self, default_project, default_organization, default_projectkey, task_runner, redis_cache, django_cache, ): cfg = {"dummy-key": "val"} redis_cache.set_many({default_projectkey.public_key: cfg}) assert redis_cache.get(default_projectkey.public_key) == cfg with task_runner(): schedule_invalidate_project_config(project_id=default_project.id, trigger="test") for cache_key in _cache_keys_for_project(default_project): cfg_from_cache = redis_cache.get(cache_key) assert "dummy-key" not in cfg_from_cache assert cfg_from_cache["disabled"] is False assert cfg_from_cache["projectId"] == default_project.id def test_invalidate_org( self, default_project, default_organization, default_projectkey, redis_cache, task_runner, django_cache, ): # Currently for org-wide we delete the config instead of computing it. cfg = {"dummy-key": "val"} redis_cache.set_many({default_projectkey.public_key: cfg}) assert redis_cache.get(default_projectkey.public_key) == cfg with task_runner(): schedule_invalidate_project_config( organization_id=default_organization.id, trigger="test" ) for cache_key in _cache_keys_for_project(default_project): new_cfg = redis_cache.get(cache_key) assert new_cfg is not None assert new_cfg != cfg @mock.patch( "sentry.tasks.relay._schedule_invalidate_project_config", wraps=_schedule_invalidate_project_config, ) @mock.patch("django.db.transaction.on_commit", wraps=transaction.on_commit) def test_project_config_invalidations_after_commit( self, oncommit, schedule_inner, default_project, ): schedule_invalidate_project_config( trigger="test", project_id=default_project.id, countdown=2 ) assert oncommit.call_count == 1 assert schedule_inner.call_count == 1 assert schedule_inner.call_args == mock.call( trigger="test", trigger_details=None, organization_id=None, project_id=default_project.id, public_key=None, countdown=2, ) @mock.patch("sentry.tasks.relay._schedule_invalidate_project_config") def test_project_config_invalidations_delayed( self, schedule_inner, default_project, ): with transaction.atomic(router.db_for_write(ProjectOption)): schedule_invalidate_project_config( trigger="inside-transaction", project_id=default_project, countdown=2 ) assert schedule_inner.call_count == 0 assert schedule_inner.call_count == 1 schedule_invalidate_project_config( trigger="outside-transaction", project_id=default_project, countdown=2 ) assert schedule_inner.call_count == 2 @override_options({"taskworker.enabled": True}) @django_db_all(transaction=True) @thread_leak_allowlist(reason="relay integration tests", issue=97040) def test_invalidate_hierarchy( default_project, default_projectkey, redis_cache, debounce_cache, invalidation_debounce_cache, django_cache, ): # Put something in the cache, otherwise the invalidation task won't compute anything. redis_cache.set_many({default_projectkey.public_key: {"dummy": "dummy"}}) orig_apply_async = invalidate_project_config.apply_async calls = [] def proxy(*args, **kwargs): calls.append((args, kwargs)) orig_apply_async(*args, **kwargs) with ( mock.patch.object(invalidate_project_config, "apply_async", proxy), BurstTaskRunner() as run, ): schedule_invalidate_project_config( organization_id=default_project.organization.id, trigger="test" ) schedule_invalidate_project_config(project_id=default_project.id, trigger="test") run(max_jobs=10) assert len(calls) == 1 cache = redis_cache.get(default_projectkey) assert cache["disabled"] is False
TestInvalidationTask
python
openai__openai-python
src/openai/types/responses/response_output_text.py
{ "start": 2495, "end": 2810 }
class ____(BaseModel): annotations: List[Annotation] """The annotations of the text output.""" text: str """The text output from the model.""" type: Literal["output_text"] """The type of the output text. Always `output_text`.""" logprobs: Optional[List[Logprob]] = None
ResponseOutputText
python
pypa__pipenv
pipenv/patched/pip/_vendor/distlib/wheel.py
{ "start": 3625, "end": 4732 }
class ____(object): def __init__(self): self.impure_wheels = {} self.libs = {} def add(self, pathname, extensions): self.impure_wheels[pathname] = extensions self.libs.update(extensions) def remove(self, pathname): extensions = self.impure_wheels.pop(pathname) for k, v in extensions: if k in self.libs: del self.libs[k] def find_module(self, fullname, path=None): if fullname in self.libs: result = self else: result = None return result def load_module(self, fullname): if fullname in sys.modules: result = sys.modules[fullname] else: if fullname not in self.libs: raise ImportError('unable to find extension for %s' % fullname) result = _load_dynamic(fullname, self.libs[fullname]) result.__loader__ = self parts = fullname.rsplit('.', 1) if len(parts) > 1: result.__package__ = parts[0] return result _hook = Mounter()
Mounter
python
conda__conda
conda/common/io.py
{ "start": 2751, "end": 3265 }
class ____(ContextDecorator): # Ignore BrokenPipeError and errors related to stdout or stderr being # closed by a downstream program. def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): if ( exc_val and isinstance(exc_val, EnvironmentError) and getattr(exc_val, "errno", None) and exc_val.errno in (EPIPE, ESHUTDOWN) ): return True swallow_broken_pipe = SwallowBrokenPipe()
SwallowBrokenPipe
python
jazzband__django-simple-history
simple_history/tests/tests/test_models.py
{ "start": 63752, "end": 64176 }
class ____(TestCase): def test_restore_pollwithexclude(self): poll = PollWithExcludeFields.objects.create( question="what's up?", pub_date=today ) historical = poll.history.order_by("pk")[0] with self.assertRaises(AttributeError): historical.pub_date original = historical.instance self.assertEqual(original.pub_date, poll.pub_date)
ExcludeFieldsTest
python
doocs__leetcode
solution/2800-2899/2864.Maximum Odd Binary Number/Solution.py
{ "start": 0, "end": 156 }
class ____: def maximumOddBinaryNumber(self, s: str) -> str: cnt = s.count("1") return "1" * (cnt - 1) + (len(s) - cnt) * "0" + "1"
Solution
python
google__flatbuffers
tests/py_test.py
{ "start": 29432, "end": 34837 }
class ____(unittest.TestCase): """Low level stress/fuzz test: serialize/deserialize a variety of different kinds of data in different combinations """ binary_type = compat.binary_types[0] # this will always exist ofInt32Bytes = binary_type([0x83, 0x33, 0x33, 0x33]) ofInt64Bytes = binary_type([0x84, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44]) overflowingInt32Val = flatbuffers.encode.Get( flatbuffers.packer.int32, ofInt32Bytes, 0 ) overflowingInt64Val = flatbuffers.encode.Get( flatbuffers.packer.int64, ofInt64Bytes, 0 ) # Values we're testing against: chosen to ensure no bits get chopped # off anywhere, and also be different from eachother. boolVal = True int8Val = N.Int8Flags.py_type(-127) # 0x81 uint8Val = N.Uint8Flags.py_type(0xFF) int16Val = N.Int16Flags.py_type(-32222) # 0x8222 uint16Val = N.Uint16Flags.py_type(0xFEEE) int32Val = N.Int32Flags.py_type(overflowingInt32Val) uint32Val = N.Uint32Flags.py_type(0xFDDDDDDD) int64Val = N.Int64Flags.py_type(overflowingInt64Val) uint64Val = N.Uint64Flags.py_type(0xFCCCCCCCCCCCCCCC) # Python uses doubles, so force it here float32Val = N.Float32Flags.py_type(ctypes.c_float(3.14159).value) float64Val = N.Float64Flags.py_type(3.14159265359) def test_fuzz(self): return self.check_once(11, 100) def check_once(self, fuzzFields, fuzzObjects): testValuesMax = 11 # hardcoded to the number of scalar types builder = flatbuffers.Builder(0) l = LCG() objects = [0 for _ in compat_range(fuzzObjects)] # Generate fuzzObjects random objects each consisting of # fuzzFields fields, each of a random type. for i in compat_range(fuzzObjects): builder.StartObject(fuzzFields) for j in compat_range(fuzzFields): choice = int(l.Next()) % testValuesMax if choice == 0: builder.PrependBoolSlot(int(j), self.boolVal, False) elif choice == 1: builder.PrependInt8Slot(int(j), self.int8Val, 0) elif choice == 2: builder.PrependUint8Slot(int(j), self.uint8Val, 0) elif choice == 3: builder.PrependInt16Slot(int(j), self.int16Val, 0) elif choice == 4: builder.PrependUint16Slot(int(j), self.uint16Val, 0) elif choice == 5: builder.PrependInt32Slot(int(j), self.int32Val, 0) elif choice == 6: builder.PrependUint32Slot(int(j), self.uint32Val, 0) elif choice == 7: builder.PrependInt64Slot(int(j), self.int64Val, 0) elif choice == 8: builder.PrependUint64Slot(int(j), self.uint64Val, 0) elif choice == 9: builder.PrependFloat32Slot(int(j), self.float32Val, 0) elif choice == 10: builder.PrependFloat64Slot(int(j), self.float64Val, 0) else: raise RuntimeError('unreachable') off = builder.EndObject() # store the offset from the end of the builder buffer, # since it will keep growing: objects[i] = off # Do some bookkeeping to generate stats on fuzzes: stats = defaultdict(int) def check(table, desc, want, got): stats[desc] += 1 self.assertEqual(want, got, '%s != %s, %s' % (want, got, desc)) l = LCG() # Reset. # Test that all objects we generated are readable and return the # expected values. We generate random objects in the same order # so this is deterministic. for i in compat_range(fuzzObjects): table = flatbuffers.table.Table( builder.Bytes, len(builder.Bytes) - objects[i] ) for j in compat_range(fuzzFields): field_count = flatbuffers.builder.VtableMetadataFields + j f = N.VOffsetTFlags.py_type(field_count * N.VOffsetTFlags.bytewidth) choice = int(l.Next()) % testValuesMax if choice == 0: check( table, 'bool', self.boolVal, table.GetSlot(f, False, N.BoolFlags) ) elif choice == 1: check(table, '<i1', self.int8Val, table.GetSlot(f, 0, N.Int8Flags)) elif choice == 2: check(table, '<u1', self.uint8Val, table.GetSlot(f, 0, N.Uint8Flags)) elif choice == 3: check(table, '<i2', self.int16Val, table.GetSlot(f, 0, N.Int16Flags)) elif choice == 4: check( table, '<u2', self.uint16Val, table.GetSlot(f, 0, N.Uint16Flags) ) elif choice == 5: check(table, '<i4', self.int32Val, table.GetSlot(f, 0, N.Int32Flags)) elif choice == 6: check( table, '<u4', self.uint32Val, table.GetSlot(f, 0, N.Uint32Flags) ) elif choice == 7: check(table, '<i8', self.int64Val, table.GetSlot(f, 0, N.Int64Flags)) elif choice == 8: check( table, '<u8', self.uint64Val, table.GetSlot(f, 0, N.Uint64Flags) ) elif choice == 9: check( table, '<f4', self.float32Val, table.GetSlot(f, 0, N.Float32Flags) ) elif choice == 10: check( table, '<f8', self.float64Val, table.GetSlot(f, 0, N.Float64Flags) ) else: raise RuntimeError('unreachable') # If enough checks were made, verify that all scalar types were used: self.assertEqual( testValuesMax, len(stats), 'fuzzing failed to test all scalar types: %s' % stats, )
TestFuzz
python
getsentry__sentry
src/sentry/sentry_metrics/indexer/strings.py
{ "start": 10731, "end": 14454 }
class ____(StringIndexer): """ Wrapper for static strings """ def __init__(self, indexer: StringIndexer) -> None: self.indexer = indexer def bulk_record( self, strings: Mapping[UseCaseID, Mapping[OrgId, set[str]]] ) -> UseCaseKeyResults: static_keys = UseCaseKeyCollection(strings) static_key_results = UseCaseKeyResults() for use_case_id, org_id, string in static_keys.as_tuples(): if string in SHARED_STRINGS: id = SHARED_STRINGS[string] static_key_results.add_use_case_key_result( UseCaseKeyResult(use_case_id, org_id, string, id), FetchType.HARDCODED ) org_strings_left = static_key_results.get_unmapped_use_case_keys(static_keys) if org_strings_left.size == 0: return static_key_results indexer_results = self.indexer.bulk_record( { use_case_id: key_collection.mapping for use_case_id, key_collection in org_strings_left.mapping.items() } ) return static_key_results.merge(indexer_results) def record(self, use_case_id: UseCaseID, org_id: int, string: str) -> int | None: if string in SHARED_STRINGS: return SHARED_STRINGS[string] return self.indexer.record(use_case_id=use_case_id, org_id=org_id, string=string) @metric_path_key_compatible_resolve def resolve(self, use_case_id: UseCaseID, org_id: int, string: str) -> int | None: # TODO: remove this metric after investigation is over if use_case_id is UseCaseID.ESCALATING_ISSUES: metrics.incr("sentry_metrics.indexer.string_indexer_resolve_escalating_issues") if string in SHARED_STRINGS: return SHARED_STRINGS[string] return self.indexer.resolve(use_case_id, org_id, string) @metric_path_key_compatible_rev_resolve def reverse_resolve(self, use_case_id: UseCaseID, org_id: int, id: int) -> str | None: if id in REVERSE_SHARED_STRINGS: return REVERSE_SHARED_STRINGS[id] resolved_id = self.indexer.reverse_resolve(use_case_id, org_id, id) if resolved_id is None: # HACK: if a string gets re-indexed we need to have some way to look # up the old id and we do it this way because the table has a unique # constraint on the org_id and the string. reindexed_ints = settings.SENTRY_METRICS_INDEXER_REINDEXED_INTS if id in reindexed_ints: return reindexed_ints[id] return resolved_id def bulk_reverse_resolve( self, use_case_id: UseCaseID, org_id: int, ids: Collection[int] ) -> Mapping[int, str]: shared_strings: dict[int, str] = {} unresolved_ids = [] for ident in ids: if ident in REVERSE_SHARED_STRINGS: # resolved the shared string shared_strings[ident] = REVERSE_SHARED_STRINGS[ident] else: # remember the position of the strings we need to resolve unresolved_ids.append(ident) # insert the strings resolved by the base indexer in the global result org_strings = self.indexer.bulk_reverse_resolve(use_case_id, org_id, unresolved_ids) return {**org_strings, **shared_strings} def resolve_shared_org(self, string: str) -> int | None: if string in SHARED_STRINGS: return SHARED_STRINGS[string] return None def reverse_shared_org_resolve(self, id: int) -> str | None: if id in REVERSE_SHARED_STRINGS: return REVERSE_SHARED_STRINGS[id] return None
StaticStringIndexer
python
encode__django-rest-framework
tests/browsable_api/test_browsable_api.py
{ "start": 336, "end": 1270 }
class ____(TestCase): """Tests correct handling of anonymous user request on endpoints with IsAuthenticated permission class.""" def setUp(self): self.client = APIClient(enforce_csrf_checks=True) def tearDown(self): self.client.logout() def test_get_raises_typeerror_when_anonymous_user_in_queryset_filter(self): with self.assertRaises(TypeError): self.client.get('/basicviewset') def test_get_returns_http_forbidden_when_anonymous_user(self): old_permissions = BasicModelWithUsersViewSet.permission_classes BasicModelWithUsersViewSet.permission_classes = [IsAuthenticated, OrganizationPermissions] response = self.client.get('/basicviewset') BasicModelWithUsersViewSet.permission_classes = old_permissions self.assertEqual(response.status_code, 403) @override_settings(ROOT_URLCONF='tests.browsable_api.auth_urls')
AnonymousUserTests
python
pytorch__pytorch
torch/_dynamo/variables/misc.py
{ "start": 81538, "end": 82789 }
class ____(VariableTracker): @staticmethod def build(tx, weakref_value, **options): source = options.get("source") callback = weakref_value.__callback__ callback_source = source and AttrSource(source, "__callback__") callback_vt = VariableTracker.build(tx, callback, callback_source) referent = weakref_value() source = source and WeakRefCallSource(source) referent_vt = VariableTracker.build(tx, referent, source) options["source"] = source return WeakRefVariable(referent_vt, callback_vt, **options) def __init__(self, referent_vt, callback_vt, **options): super().__init__(**options) self.referent_vt = referent_vt self.callback_vt = callback_vt def call_function( self, tx: "InstructionTranslator", args: "list[VariableTracker]", kwargs: "dict[str, VariableTracker]", ) -> "VariableTracker": return self.referent_vt def reconstruct(self, codegen: "PyCodegen"): codegen.add_push_null(lambda: codegen.load_import_from("weakref", "ref")) codegen(self.referent_vt) codegen(self.callback_vt) codegen.extend_output(create_call_function(2, False))
WeakRefVariable
python
kubernetes-client__python
kubernetes/client/models/v1_pod_failure_policy_on_pod_conditions_pattern.py
{ "start": 383, "end": 5211 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'status': 'str', 'type': 'str' } attribute_map = { 'status': 'status', 'type': 'type' } def __init__(self, status=None, type=None, local_vars_configuration=None): # noqa: E501 """V1PodFailurePolicyOnPodConditionsPattern - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._status = None self._type = None self.discriminator = None self.status = status self.type = type @property def status(self): """Gets the status of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501 Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True. # noqa: E501 :return: The status of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501 :rtype: str """ return self._status @status.setter def status(self, status): """Sets the status of this V1PodFailurePolicyOnPodConditionsPattern. Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True. # noqa: E501 :param status: The status of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501 raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def type(self): """Gets the type of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501 Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type. # noqa: E501 :return: The type of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this V1PodFailurePolicyOnPodConditionsPattern. Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type. # noqa: E501 :param type: The type of this V1PodFailurePolicyOnPodConditionsPattern. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501 raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501 self._type = type def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1PodFailurePolicyOnPodConditionsPattern): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1PodFailurePolicyOnPodConditionsPattern): return True return self.to_dict() != other.to_dict()
V1PodFailurePolicyOnPodConditionsPattern
python
pydantic__pydantic
pydantic-core/tests/serializers/test_model.py
{ "start": 692, "end": 2105 }
class ____(BasicModel): pass def test_model(): s = SchemaSerializer( core_schema.model_schema( BasicModel, core_schema.model_fields_schema( { 'foo': core_schema.model_field(core_schema.int_schema()), 'bar': core_schema.model_field(core_schema.bytes_schema()), } ), ) ) assert 'mode:SimpleDict' in plain_repr(s) assert 'has_extra:false' in plain_repr(s) assert s.to_python(BasicModel(foo=1, bar=b'more')) == IsStrictDict(foo=1, bar=b'more') assert s.to_python(BasicSubModel(foo=1, bar=b'more')) == IsStrictDict(foo=1, bar=b'more') assert s.to_python(BasicModel(bar=b'more', foo=1)) == IsStrictDict(bar=b'more', foo=1) assert s.to_python(BasicModel(foo=1, c=3, bar=b'more')) == IsStrictDict(foo=1, bar=b'more') assert s.to_python(BasicModel(bar=b'more', foo=1, c=3), mode='json') == IsStrictDict(bar='more', foo=1) assert s.to_python(BasicSubModel(bar=b'more', foo=1, c=3), mode='json') == IsStrictDict(bar='more', foo=1) j = s.to_json(BasicModel(bar=b'more', foo=1, c=3)) if on_pypy: assert json.loads(j) == {'bar': 'more', 'foo': 1} else: assert j == b'{"bar":"more","foo":1}' assert json.loads(s.to_json(BasicSubModel(bar=b'more', foo=1, c=3))) == {'bar': 'more', 'foo': 1} @dataclasses.dataclass
BasicSubModel
python
tiangolo__fastapi
scripts/notify_translations.py
{ "start": 2265, "end": 2327 }
class ____(BaseModel): comments: Comments
CommentsDiscussion
python
huggingface__transformers
tests/models/deepseek_v3/test_modeling_deepseek_v3.py
{ "start": 1516, "end": 7674 }
class ____: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, intermediate_size=37, moe_intermediate_size=12, num_hidden_layers=2, num_attention_heads=4, num_key_value_heads=4, n_shared_experts=1, n_routed_experts=8, routed_scaling_factor=2.5, kv_lora_rank=16, q_lora_rank=32, qk_rope_head_dim=16, v_head_dim=32, qk_nope_head_dim=32, n_group=2, topk_group=1, num_experts_per_tok=8, first_k_dense_replace=2, norm_topk_prob=True, aux_loss_alpha=0.001, hidden_act="silu", max_position_embeddings=512, initializer_range=0.02, attention_probs_dropout_prob=0.1, type_vocab_size=16, type_sequence_label_size=2, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.moe_intermediate_size = moe_intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.n_shared_experts = n_shared_experts self.n_routed_experts = n_routed_experts self.routed_scaling_factor = routed_scaling_factor self.kv_lora_rank = kv_lora_rank self.q_lora_rank = q_lora_rank self.qk_rope_head_dim = qk_rope_head_dim self.v_head_dim = v_head_dim self.qk_nope_head_dim = qk_nope_head_dim self.n_group = n_group self.topk_group = topk_group self.num_experts_per_tok = num_experts_per_tok self.first_k_dense_replace = first_k_dense_replace self.norm_topk_prob = norm_topk_prob self.aux_loss_alpha = aux_loss_alpha self.hidden_act = hidden_act self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return DeepseekV3Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, intermediate_size=self.intermediate_size, moe_intermediate_size=self.moe_intermediate_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, n_shared_experts=self.n_shared_experts, n_routed_experts=self.n_routed_experts, routed_scaling_factor=self.routed_scaling_factor, kv_lora_rank=self.kv_lora_rank, q_lora_rank=self.q_lora_rank, qk_rope_head_dim=self.qk_rope_head_dim, v_head_dim=self.v_head_dim, qk_nope_head_dim=self.qk_nope_head_dim, n_group=self.n_group, topk_group=self.topk_group, num_experts_per_tok=self.num_experts_per_tok, first_k_dense_replace=self.first_k_dense_replace, norm_topk_prob=self.norm_topk_prob, aux_loss_alpha=self.aux_loss_alpha, hidden_act=self.hidden_act, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=True, pad_token_id=self.pad_token_id, attention_dropout=self.attention_probs_dropout_prob, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = DeepseekV3Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch
DeepseekV3ModelTester
python
openai__openai-python
src/openai/resources/realtime/calls.py
{ "start": 31293, "end": 31936 }
class ____: def __init__(self, calls: AsyncCalls) -> None: self._calls = calls self.create = _legacy_response.async_to_raw_response_wrapper( calls.create, ) self.accept = _legacy_response.async_to_raw_response_wrapper( calls.accept, ) self.hangup = _legacy_response.async_to_raw_response_wrapper( calls.hangup, ) self.refer = _legacy_response.async_to_raw_response_wrapper( calls.refer, ) self.reject = _legacy_response.async_to_raw_response_wrapper( calls.reject, )
AsyncCallsWithRawResponse
python
getsentry__sentry
src/sentry/migrations/0959_add_has_logs_bit_to_project_model.py
{ "start": 170, "end": 2893 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False dependencies = [ ("sentry", "0958_base_option_json_field"), ] operations = [ migrations.AlterField( model_name="project", name="flags", field=bitfield.models.BitField( [ "has_releases", "has_issue_alerts_targeting", "has_transactions", "has_alert_filters", "has_sessions", "has_profiles", "has_replays", "has_feedbacks", "has_new_feedbacks", "spike_protection_error_currently_active", "spike_protection_transaction_currently_active", "spike_protection_attachment_currently_active", "has_minified_stack_trace", "has_cron_monitors", "has_cron_checkins", "has_sourcemaps", "has_custom_metrics", "has_high_priority_alerts", "has_insights_http", "has_insights_db", "has_insights_assets", "has_insights_app_start", "has_insights_screen_load", "has_insights_vitals", "has_insights_caches", "has_insights_queues", "has_insights_llm_monitoring", "has_flags", "has_insights_agent_monitoring", "has_insights_mcp", "has_logs", ], default=10, ), ), ]
Migration
python
zarr-developers__zarr-python
src/zarr/core/dtype/npy/structured.py
{ "start": 803, "end": 1645 }
class ____(DTypeConfig_V2[StructuredName_V2, None]): """ A wrapper around the JSON representation of the ``Structured`` data type in Zarr V2. The ``name`` field is a sequence of sequences, where each inner sequence has two values: the field name and the data type name for that field (which could be another sequence). The data type names are strings, and the object codec ID is always None. References ---------- The structure of the ``name`` field is defined in the Zarr V2 [specification document](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v2/v2.0.rst#data-type-encoding). Examples -------- ```python { "name": [ ["f0", "<m8[10s]"], ["f1", "<m8[10s]"], ], "object_codec_id": None } ``` """
StructuredJSON_V2
python
scipy__scipy
scipy/signal/_short_time_fft.py
{ "start": 10367, "end": 101661 }
class ____: r"""Provide a parametrized discrete Short-time Fourier transform (stft) and its inverse (istft). .. currentmodule:: scipy.signal.ShortTimeFFT The `~ShortTimeFFT.stft` calculates sequential FFTs by sliding a window (`win`) over an input signal by `hop` increments. It can be used to quantify the change of the spectrum over time. The `~ShortTimeFFT.stft` is represented by a complex-valued matrix S[q,p] where the p-th column represents an FFT with the window centered at the time t[p] = p * `delta_t` = p * `hop` * `T` where `T` is the sampling interval of the input signal. The q-th row represents the values at the frequency f[q] = q * `delta_f` with `delta_f` = 1 / (`mfft` * `T`) being the bin width of the FFT. The inverse STFT `~ShortTimeFFT.istft` is calculated by reversing the steps of the STFT: Take the IFFT of the p-th slice of S[q,p] and multiply the result with the so-called dual window (see `dual_win`). Shift the result by p * `delta_t` and add the result to previous shifted results to reconstruct the signal. If only the dual window is known and the STFT is invertible, `from_dual` can be used to instantiate this class. By default, the so-called canonical dual window is used. It is the window with minimal energy among all possible dual windows. `from_win_equals_dual` and `~scipy.signal.closest_STFT_dual_window` provide means for utilizing alterantive dual windows. Note that `win` is also always a dual window of `dual_win`. Due to the convention of time t = 0 being at the first sample of the input signal, the STFT values typically have negative time slots. Hence, negative indexes like `p_min` or `k_min` do not indicate counting backwards from an array's end like in standard Python indexing but being left of t = 0. More detailed information can be found in the :ref:`tutorial_stft` section of the :ref:`user_guide`. Note that all parameters of the initializer, except `scale_to` (which uses `scaling`) have identical named attributes. Parameters ---------- win : np.ndarray The window must be a real- or complex-valued 1d array. hop : int The increment in samples, by which the window is shifted in each step. fs : float Sampling frequency of input signal and window. Its relation to the sampling interval `T` is ``T = 1 / fs``. fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' Mode of FFT to be used (default 'onesided'). See property `fft_mode` for details. mfft: int | None Length of the FFT used, if a zero padded FFT is desired. If ``None`` (default), the length of the window `win` is used. dual_win : np.ndarray | None The dual window of `win`. If set to ``None``, it is calculated if needed. scale_to : 'magnitude', 'psd' | None If not ``None`` (default) the window function is scaled, so each STFT column represents either a 'magnitude' or a power spectral density ('psd') spectrum. This parameter sets the property `scaling` to the same value. See method `scale_to` for details. phase_shift : int | None If set, add a linear phase `phase_shift` / `mfft` * `f` to each frequency `f`. The default value of 0 ensures that there is no phase shift on the zeroth slice (in which t=0 is centered). See property `phase_shift` for more details. Notes ----- A typical STFT application is the creation of various types of time-frequency plots, often subsumed under the term "spectrogram". Note that this term is also used to explecitly refer to the absolute square of a STFT [11]_, as done in :meth:`spectrogram`. The STFT can also be used for filtering and filter banks as discussed in [12]_. References ---------- .. [11] Karlheinz Gröchenig: "Foundations of Time-Frequency Analysis", Birkhäuser Boston 2001, `10.1007/978-1-4612-0003-1` .. [12] Julius O. Smith III, "Spectral Audio Signal Processing", online book, 2011, https://www.dsprelated.com/freebooks/sasp/ Examples -------- The following example shows the magnitude of the STFT of a sine with varying frequency :math:`f_i(t)` (marked by a red dashed line in the plot): >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import ShortTimeFFT >>> from scipy.signal.windows import gaussian ... >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal >>> t_x = np.arange(N) * T_x # time indexes for signal >>> f_i = 1 * np.arctan((t_x - t_x[N // 2]) / 2) + 5 # varying frequency >>> x = np.sin(2*np.pi*np.cumsum(f_i)*T_x) # the signal The utilized Gaussian window is 50 samples or 2.5 s long. The parameter ``mfft=200`` in `ShortTimeFFT` causes the spectrum to be oversampled by a factor of 4: >>> g_std = 8 # standard deviation for Gaussian window in samples >>> w = gaussian(50, std=g_std, sym=True) # symmetric Gaussian window >>> SFT = ShortTimeFFT(w, hop=10, fs=1/T_x, mfft=200, scale_to='magnitude') >>> Sx = SFT.stft(x) # perform the STFT In the plot, the time extent of the signal `x` is marked by vertical dashed lines. Note that the SFT produces values outside the time range of `x`. The shaded areas on the left and the right indicate border effects caused by the window slices in that area not fully being inside time range of `x`: >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot >>> ax1.set_title(rf"STFT ({SFT.m_num*SFT.T:g}$\,s$ Gaussian window, " + ... rf"$\sigma_t={g_std*SFT.T}\,$s)") >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", ... xlim=(t_lo, t_hi)) ... >>> im1 = ax1.imshow(abs(Sx), origin='lower', aspect='auto', ... extent=SFT.extent(N), cmap='viridis') >>> ax1.plot(t_x, f_i, 'r--', alpha=.5, label='$f_i(t)$') >>> fig1.colorbar(im1, label="Magnitude $|S_x(t, f)|$") ... >>> # Shade areas where window slices stick out to the side: >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.2) >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line: ... ax1.axvline(t_, color='y', linestyle='--', alpha=0.5) >>> ax1.legend() >>> fig1.tight_layout() >>> plt.show() Reconstructing the signal with the `~ShortTimeFFT.istft` is straightforward, but note that the length of `x1` should be specified, since the STFT length increases in `hop` steps: >>> SFT.invertible # check if invertible True >>> x1 = SFT.istft(Sx, k1=N) >>> np.allclose(x, x1) True It is possible to calculate the STFT of signal parts: >>> N2 = SFT.nearest_k_p(N // 2) >>> Sx0 = SFT.stft(x[:N2]) >>> Sx1 = SFT.stft(x[N2:]) When assembling sequential STFT parts together, the overlap needs to be considered: >>> p0_ub = SFT.upper_border_begin(N2)[1] - SFT.p_min >>> p1_le = SFT.lower_border_end[1] - SFT.p_min >>> Sx01 = np.hstack((Sx0[:, :p0_ub], ... Sx0[:, p0_ub:] + Sx1[:, :p1_le], ... Sx1[:, p1_le:])) >>> np.allclose(Sx01, Sx) # Compare with SFT of complete signal True It is also possible to calculate the `itsft` for signal parts: >>> y_p = SFT.istft(Sx, N//3, N//2) >>> np.allclose(y_p, x[N//3:N//2]) True """ # immutable attributes (only have getters but no setters): _win: np.ndarray # window _dual_win: np.ndarray | None = None # canonical dual window _hop: int # Step of STFT in number of samples # mutable attributes: _fs: float # sampling frequency of input signal and window _fft_mode: FFT_MODE_TYPE = 'onesided' # Mode of FFT to use _mfft: int # length of FFT used - defaults to len(win) _scaling: Literal['magnitude', 'psd', 'unitary'] | None = None # Scaling of _win _phase_shift: int | None # amount to shift phase of FFT in samples # attributes for caching calculated values: _fac_mag: float | None = None _fac_psd: float | None = None _lower_border_end: tuple[int, int] | None = None # The following tuples store parameter(s) and return value(s) of methods for caching # (initialized with invalid parameters; should only be accessed by atomic # read/writes to alleviate potential multithreading issues): _cache_post_padding: tuple[int, tuple[int, int]] = -1, (0, 0) _cache_upper_border_begin: tuple[int, tuple[int, int]] = -1, (0, 0) _cache_t: tuple[tuple[int, int | None, int | None, int, float], np.ndarray] = \ (-1, None, None, 0, 0.), np.ndarray([]) _cache_f: tuple[tuple[FFT_MODE_TYPE, int, float], np.ndarray] = \ ('onesided', -1, 1.), np.ndarray([]) # generic type compatibility with scipy-stubs __class_getitem__ = classmethod(GenericAlias) def __init__(self, win: np.ndarray, hop: int, fs: float, *, fft_mode: FFT_MODE_TYPE = 'onesided', mfft: int | None = None, dual_win: np.ndarray | None = None, scale_to: Literal['magnitude', 'psd'] | None = None, phase_shift: int | None = 0): if not (win.ndim == 1 and win.size > 0): raise ValueError(f"Parameter win must be 1d, but {win.shape=}!") if not all(np.isfinite(win)): raise ValueError("Parameter win must have finite entries!") if not (hop >= 1 and isinstance(hop, int | np.integer)): raise ValueError(f"Parameter {hop=} is not an integer >= 1!") self._win, self._hop, self.fs = win, hop, fs self.win.setflags(write=False) self.mfft = len(win) if mfft is None else mfft if dual_win is not None: if dual_win.shape != win.shape: raise ValueError(f"{dual_win.shape=} must equal {win.shape=}!") if not all(np.isfinite(dual_win)): raise ValueError("Parameter dual_win must be a finite array!") dual_win.setflags(write=False) self._dual_win = dual_win # needs to be set before scaling if scale_to is not None: # needs to be set before fft_mode self.scale_to(scale_to) self.fft_mode, self.phase_shift = fft_mode, phase_shift @classmethod def from_dual(cls, dual_win: np.ndarray, hop: int, fs: float, *, fft_mode: FFT_MODE_TYPE = 'onesided', mfft: int | None = None, scale_to: Literal['magnitude', 'psd'] | None = None, phase_shift: int | None = 0): r"""Instantiate a `ShortTimeFFT` by only providing a dual window. If an STFT is invertible, it is possible to calculate the window `win` from a given dual window `dual_win`. All other parameters have the same meaning as in the initializer of `ShortTimeFFT`. As explained in the :ref:`tutorial_stft` section of the :ref:`user_guide`, an invertible STFT can be interpreted as series expansion of time-shifted and frequency modulated dual windows. E.g., the series coefficient S[q,p] belongs to the term, which shifted `dual_win` by p * `delta_t` and multiplied it by exp( 2 * j * pi * t * q * `delta_f`). Examples -------- The following example discusses decomposing a signal into time- and frequency-shifted Gaussians. A Gaussian with standard deviation of one made up of 51 samples will be used: >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from scipy.signal import ShortTimeFFT >>> from scipy.signal.windows import gaussian ... >>> T, N = 0.1, 51 >>> d_win = gaussian(N, std=1/T, sym=True) # symmetric Gaussian window >>> t = T * (np.arange(N) - N//2) ... >>> fg1, ax1 = plt.subplots() >>> ax1.set_title(r"Dual Window: Gaussian with $\sigma_t=1$") >>> ax1.set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", ... xlim=(t[0], t[-1]), ylim=(0, 1.1*np.max(d_win))) >>> ax1.plot(t, d_win, 'C0-') The following plot with the overlap of 41, 11 and 2 samples show how the `hop` interval affects the shape of the window `win`: >>> fig2, axx = plt.subplots(3, 1, sharex='all') ... >>> axx[0].set_title(r"Windows for hop$\in\{10, 40, 49\}$") >>> for c_, h_ in enumerate([10, 40, 49]): ... SFT = ShortTimeFFT.from_dual(d_win, h_, 1/T) ... axx[c_].plot(t + h_ * T, SFT.win, 'k--', alpha=.3, label=None) ... axx[c_].plot(t - h_ * T, SFT.win, 'k:', alpha=.3, label=None) ... axx[c_].plot(t, SFT.win, f'C{c_+1}', ... label=r"$\Delta t=%0.1f\,$s" % SFT.delta_t) ... axx[c_].set_ylim(0, 1.1*max(SFT.win)) ... axx[c_].legend(loc='center') >>> axx[-1].set(xlabel=f"Time $t$ in seconds ({N} samples, $T={T}$ s)", ... xlim=(t[0], t[-1])) >>> plt.show() Beside the window `win` centered at t = 0 the previous (t = -`delta_t`) and following window (t = `delta_t`) are depicted. It can be seen that for small `hop` intervals, the window is compact and smooth, having a good time-frequency concentration in the STFT. For the large `hop` interval of 4.9 s, the window has small values around t = 0, which are not covered by the overlap of the adjacent windows, which could lead to numeric inaccuracies. Furthermore, the peaky shape at the beginning and the end of the window points to a higher bandwidth, resulting in a poorer time-frequency resolution of the STFT. Hence, the choice of the `hop` interval will be a compromise between a time-frequency resolution and memory requirements demanded by small `hop` sizes. See Also -------- from_window: Create instance by wrapping `get_window`. ShortTimeFFT: Create instance using standard initializer. """ win = _calc_dual_canonical_window(dual_win, hop) return cls(win=win, hop=hop, fs=fs, fft_mode=fft_mode, mfft=mfft, dual_win=dual_win, scale_to=scale_to, phase_shift=phase_shift) @classmethod def from_window(cls, win_param: str | tuple | float, fs: float, nperseg: int, noverlap: int, *, symmetric_win: bool = False, fft_mode: FFT_MODE_TYPE = 'onesided', mfft: int | None = None, scale_to: Literal['magnitude', 'psd'] | None = None, phase_shift: int | None = 0): """Instantiate `ShortTimeFFT` by using `get_window`. The method `get_window` is used to create a window of length `nperseg`. The parameter names `noverlap`, and `nperseg` are used here, since they more inline with other classical STFT libraries. Parameters ---------- win_param: Union[str, tuple, float], Parameters passed to `get_window`. For windows with no parameters, it may be a string (e.g., ``'hann'``), for parametrized windows a tuple, (e.g., ``('gaussian', 2.)``) or a single float specifying the shape parameter of a kaiser window (i.e. ``4.`` and ``('kaiser', 4.)`` are equal. See `get_window` for more details. fs : float Sampling frequency of input signal. Its relation to the sampling interval `T` is ``T = 1 / fs``. nperseg: int Window length in samples, which corresponds to the `m_num`. noverlap: int Window overlap in samples. It relates to the `hop` increment by ``hop = npsereg - noverlap``. symmetric_win: bool If ``True`` then a symmetric window is generated, else a periodic window is generated (default). Though symmetric windows seem for most applications to be more sensible, the default of a periodic windows was chosen to correspond to the default of `get_window`. This parameter is ignored, if the window name in the `window` parameter has a suffix ``'_periodic'`` or ``'_symmetric'`` appended to it (e.g., ``'hann_symmetric'``). fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' Mode of FFT to be used (default 'onesided'). See property `fft_mode` for details. mfft: int | None Length of the FFT used, if a zero padded FFT is desired. If ``None`` (default), the length of the window `win` is used. scale_to : 'magnitude', 'psd' | None If not ``None`` (default) the window function is scaled, so each STFT column represents either a 'magnitude' or a power spectral density ('psd') spectrum. This parameter sets the property `scaling` to the same value. See method `scale_to` for details. phase_shift : int | None If set, add a linear phase `phase_shift` / `mfft` * `f` to each frequency `f`. The default value 0 ensures that there is no phase shift on the zeroth slice (in which t=0 is centered). See property `phase_shift` for more details. Examples -------- The following instances ``SFT0`` and ``SFT1`` are equivalent: >>> from scipy.signal import ShortTimeFFT, get_window >>> nperseg = 9 # window length >>> w = get_window(('gaussian_periodic', 2.), nperseg) >>> fs = 128 # sampling frequency >>> hop = 3 # increment of STFT time slice >>> SFT0 = ShortTimeFFT(w, hop, fs=fs) >>> SFT1 = ShortTimeFFT.from_window(('gaussian', 2.), fs, nperseg, ... noverlap=nperseg-hop) See Also -------- scipy.signal.get_window: Return a window of a given length and type. from_dual: Create instance using dual window. ShortTimeFFT: Create instance using standard initializer. """ win = get_window(win_param, nperseg, fftbins=not symmetric_win) return cls(win, hop=nperseg-noverlap, fs=fs, fft_mode=fft_mode, mfft=mfft, scale_to=scale_to, phase_shift=phase_shift) @classmethod def from_win_equals_dual( cls, desired_win: np.ndarray, hop: int, fs: float, *, fft_mode: FFT_MODE_TYPE = 'onesided', mfft: int | None = None, scale_to: Literal['magnitude', 'psd', 'unitary'] | None = None, phase_shift: int | None = 0): r"""Create instance where the window and its dual are equal up to a scaling factor. An instance is created were window and dual window are equal as well as being closest to the parameter `desired_win` in the least-squares sense, i.e., minimizing ``abs(win-desired_win)**2``. Hence, `win` has the same length as `desired_win`. Then a scaling factor is applied accoring to the `scale_to` parameter. All other parameters have the identical meaning as in the initializer. To be able to calculate a valid window, `desired_win` needs to have a valid dual STFT window for the given `hop` interval. If this is not the case, a ``ValueError`` is raised. Parameters ---------- desired_win : np.ndarray A real-valued or complex-valued 1d array containing the sample of the desired window. hop : int The increment in samples, by which the window is shifted in each step. fs : float Sampling frequency of input signal and window. Its relation to the sampling interval `T` is ``T = 1 / fs``. fft_mode : 'twosided', 'centered', 'onesided', 'onesided2X' Mode of FFT to be used (default 'onesided'). See property `fft_mode` for details. mfft: int | None Length of the FFT used, if a zero padded FFT is desired. If ``None`` (default), the length of the window `win` is used. scale_to : 'magnitude' | 'psd' | 'unitary' | None If not ``None`` (default) the window function is scaled, so each STFT column represents either a 'magnitude' or a power spectral density ('psd') spectrum, Alternatively, the STFT can be scaled to a`unitary` mapping, i.e., dividing the window by ``np.sqrt(mfft)`` and multiplying the dual window by the same amount. phase_shift : int | None If set, add a linear phase `phase_shift` / `mfft` * `f` to each frequency `f`. The default value of 0 ensures that there is no phase shift on the zeroth slice (in which t=0 is centered). See property `phase_shift` for more details. Notes ----- The set of all possible windows with identical dual is defined by the set of linear constraints of Eq. :math:numref:`eq_STFT_AllDualWinsCond` in the :ref:`tutorial_stft` section of the :ref:`user_guide`. There it is also derived that ``ShortTimeFFT.dual_win == ShortTimeFFT.m_pts * ShortTimeFFT.win`` needs to hold for an STFT to be a unitary mapping. A unitary mapping preserves the value of the scalar product, i.e., .. math:: \langle x, y\rangle = \sum_k x[k]\, \overline{y[k]} \stackrel{\stackrel{\text{unitary}}{\downarrow}}{=} \sum_{q,p} S_x[q,p]\, \overline{S_y[q,p]} = \langle S_x[q,p], S_y[q,p]\rangle\ , with :math:`S_{x,y}` being the STFT of :math:`x,y`. Hence, the energy :math:`E_x=T\sum_k |x[k]|^2` of a signal is also preserved. This is also illustrated in the example below. Thie reason of distinguishing between no scaling (i.e., parameter `scale_to` is ``None``) and unitary scaling (i.e., ``scale_to = 'unitary'``) is due to the utilized FFT function not being unitary (i.e., using the default value ``'backward'`` for the `~scipy.fft.fft` parameter `norm`). See Also -------- closest_STFT_dual_window: Calculate the STFT dual window of a given window closest to a desired dual window. ShortTimeFFT.spectrogram: Calculate squared STFTs ShortTimeFFT: Class this property belongs to. Examples -------- The following example shows that an STFT can be indeed unitary: >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from scipy.signal import ShortTimeFFT, windows ... >>> m, hop, std = 36, 8, 5 >>> desired_win = windows.gaussian(m, std, sym=True) >>> SFT = ShortTimeFFT.from_win_equals_dual(desired_win, hop, fs=1/m, ... fft_mode='twosided', ... scale_to='unitary') >>> np.allclose(SFT.dual_win, SFT.win * SFT.m_num) # check if STFT is unitary True >>> x1, x2 = np.tile([-1, -1, 1, 1], 5), np.tile([1, -1, -1, 1], 5) >>> np.sum(x1*x2) # scalar product is zero -> orthogonal signals 0 >>> np.sum(x1**2) # scalar product of x1 with itself 20 >>> Sx11, Sx12 = SFT.spectrogram(x1), SFT.spectrogram(x1, x2) >>> np.sum(Sx12) # STFT scalar product is also zero -4.163336342344337e-16+0j # may vary >>> np.sum(Sx11) # == np.sum(x1**2) 19.999999999999996 # may vary ... ... # Do the plotting: >>> fg1, (ax11, ax12) = plt.subplots(1, 2, tight_layout=True, figsize=(8, 4)) >>> s_fac = np.sqrt(SFT.mfft) >>> _ = fg1.suptitle(f"Scaled Unitary Window of {m} Sample Gaussian with " + ... rf"{hop=}, $\sigma={std}$, Scale factor: {s_fac}") >>> ax11.set(ylabel="Amplitude", xlabel="Samples", xlim=(0, m)) >>> ax12.set(xlabel="Frequency Bins", ylabel="Magnitude Spectrum", ... xlim=(0, 15), ylim=(1e-5, 1.5)) >>> u_win_str = rf"Unitary $\times{s_fac:g}$" >>> for x_, n_ in zip((desired_win, SFT.win*s_fac), ('Desired', u_win_str)): ... ax11.plot(x_, '.-', alpha=0.5, label=n_) ... X_ = np.fft.rfft(x_) / np.sum(abs(x_)) ... ax12.semilogy(abs(X_), '.-', alpha=0.5, label=n_) >>> for ax_ in (ax11, ax12): ... ax_.grid(True) ... ax_.legend() >>> plt.show() Note that ``fftmode='twosided'`` is used, since we need sum over the complete time frequency plane. Due to passing ``scale_to='unitary'`` the window ``SFT.win`` is scaled by ``1/np.sqrt(SFT.mfft)``. Hence, ``SFT.win`` needs to be scaled by `s_fac` in the plot above. """ if not (desired_win.ndim == 1 and desired_win.size > 0): raise ValueError(f"Parameter desired_win is not 1d, but " f"{desired_win.shape=}!") if issubclass(desired_win.dtype.type, np.integer): raise ValueError("Parameter desired_win cannot be of integer type, " + f"but {desired_win.dtype=} => cast to float | complex ") if not all(np.isfinite(desired_win)): raise ValueError("Parameter desired_win must have finite entries!") if not (1 <= hop <= len(desired_win) and isinstance(hop, int | np.integer)): raise ValueError(f"Parameter {hop=} is not an integer between 1 and " + f"{len(desired_win)=}!") if scale_to not in ['magnitude', 'psd', 'unitary', None]: raise ValueError(f"Parameter {scale_to=} not in " + "['magnitude', 'psd', 'unitary', None]!") mfft = len(desired_win) if mfft is None else mfft s_fac = np.sqrt(mfft) if scale_to == 'unitary' else 1 win = desired_win.copy() # we do not want to modify input parameters relative_resolution = np.finfo(win.dtype).resolution * max(win) for m in range(hop): a = np.linalg.norm(desired_win[m::hop]) if not (a > relative_resolution): raise ValueError("Parameter desired_win does not have valid STFT dual " f"window for {hop=}!") win[m::hop] /= a SFT = cls(win=win/s_fac, hop=hop, fs=fs, fft_mode=fft_mode, mfft=mfft, dual_win=win*s_fac, phase_shift=phase_shift, scale_to=None if scale_to=='unitary' else scale_to) if scale_to == 'unitary': SFT._scaling = scale_to return SFT @property def win(self) -> np.ndarray: """Window function as real- or complex-valued 1d array. This attribute is read-only, since `dual_win` depends on it. To make this array immutable, its WRITEABLE flag is set to ``FALSE``. See Also -------- dual_win: Dual window. m_num: Number of samples in window `win`. m_num_mid: Center index of window `win`. mfft: Length of input for the FFT used - may be larger than `m_num`. hop: ime increment in signal samples for sliding window. win: Window function as real- or complex-valued 1d array. numpy.ndarray.setflags: Modify array flags. ShortTimeFFT: Class this property belongs to. """ return self._win @property def hop(self) -> int: """Time increment in signal samples for sliding window. This attribute is read only, since `dual_win` depends on it. See Also -------- delta_t: Time increment of STFT (``hop*T``) m_num: Number of samples in window `win`. m_num_mid: Center index of window `win`. mfft: Length of input for the FFT used - may be larger than `m_num`. T: Sampling interval of input signal and of the window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to. """ return self._hop @property def T(self) -> float: """Sampling interval of input signal and of the window. A ``ValueError`` is raised if it is set to a non-positive value. See Also -------- delta_t: Time increment of STFT (``hop*T``) hop: Time increment in signal samples for sliding window. fs: Sampling frequency (being ``1/T``) t: Times of STFT for an input signal with `n` samples. ShortTimeFFT: Class this property belongs to. """ return 1 / self._fs @T.setter def T(self, v: float): """Sampling interval of input signal and of the window. A ``ValueError`` is raised if it is set to a non-positive value. """ if not (v > 0): raise ValueError(f"Sampling interval T={v} must be positive!") self._fs = 1 / v @property def fs(self) -> float: """Sampling frequency of input signal and of the window. The sampling frequency is the inverse of the sampling interval `T`. A ``ValueError`` is raised if it is set to a non-positive value. See Also -------- delta_t: Time increment of STFT (``hop*T``) hop: Time increment in signal samples for sliding window. T: Sampling interval of input signal and of the window (``1/fs``). ShortTimeFFT: Class this property belongs to. """ return self._fs @fs.setter def fs(self, v: float): """Sampling frequency of input signal and of the window. The sampling frequency is the inverse of the sampling interval `T`. A ``ValueError`` is raised if it is set to a non-positive value. """ if not (v > 0): raise ValueError(f"Sampling frequency fs={v} must be positive!") self._fs = v @property def fft_mode(self) -> FFT_MODE_TYPE: """Mode of utilized FFT ('twosided', 'centered', 'onesided' or 'onesided2X'). It can have the following values: 'twosided': Two-sided FFT, where values for the negative frequencies are in upper half of the array. Corresponds to :func:`~scipy.fft.fft()`. 'centered': Two-sided FFT with the values being ordered along monotonically increasing frequencies. Corresponds to applying :func:`~scipy.fft.fftshift()` to :func:`~scipy.fft.fft()`. 'onesided': Calculates only values for non-negative frequency values. Corresponds to :func:`~scipy.fft.rfft()`. 'onesided2X': Like `onesided`, but the non-zero frequencies are doubled if `scaling` is set to 'magnitude' or multiplied by ``sqrt(2)`` if set to 'psd'. If `scaling` is ``None``, setting `fft_mode` to `onesided2X` is not allowed. If the FFT length `mfft` is even, the last FFT value is not paired, and thus it is not scaled. Note that `onesided` and `onesided2X` do not work for complex-valued signals or complex-valued windows. Furthermore, the frequency values can be obtained by reading the `f` property, and the number of samples by accessing the `f_pts` property. See Also -------- delta_f: Width of the frequency bins of the STFT. f: Frequencies values of the STFT. f_pts: Width of the frequency bins of the STFT. onesided_fft: True if a one-sided FFT is used. scaling: Normalization applied to the window function ShortTimeFFT: Class this property belongs to. """ return self._fft_mode @fft_mode.setter def fft_mode(self, t: FFT_MODE_TYPE): """Set mode of FFT. Allowed values are 'twosided', 'centered', 'onesided', 'onesided2X'. See the property `fft_mode` for more details. """ if t not in (fft_mode_types := get_args(FFT_MODE_TYPE)): raise ValueError(f"fft_mode='{t}' not in {fft_mode_types}!") if t in {'onesided', 'onesided2X'} and np.iscomplexobj(self.win): raise ValueError(f"One-sided spectra, i.e., fft_mode='{t}', " + "are not allowed for complex-valued windows!") if t == 'onesided2X' and self.scaling is None: raise ValueError(f"For scaling is None, fft_mode='{t}' is invalid!" "Do scale_to('psd') or scale_to('magnitude')!") self._fft_mode = t @property def mfft(self) -> int: """Length of input for the FFT used - may be larger than window length `m_num`. If not set, `mfft` defaults to the window length `m_num`. See Also -------- f_pts: Number of points along the frequency axis. f: Frequencies values of the STFT. m_num: Number of samples in window `win`. ShortTimeFFT: Class this property belongs to. """ return self._mfft @mfft.setter def mfft(self, n_: int): """Setter for the length of FFT utilized. See the property `mfft` for further details. """ if not (n_ >= self.m_num): raise ValueError(f"Attribute mfft={n_} needs to be at least the " + f"window length m_num={self.m_num}!") self._mfft = n_ @property def scaling(self) -> Literal['magnitude', 'psd', 'unitary'] | None: """Normalization applied to the window function ('magnitude', 'psd', 'unitary', or ``None``). If not ``None``, the FFT slices may be either interpreted as a `magnitude` or a power spectral density spectrum (`psd`). If set to `unitary`, the STFT may be interpreted as a unitary mapping, i.e., preserving the value of the scalar product. The window function can be scaled by calling the `scale_to` method, or it is set by the initializer parameter ``scale_to``. Note that a window cannot to be scaled to be `unitary`. Use `from_win_equals_dual` to create a unitary `ShortTimeFFT` instance. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. fac_psd: Scaling factor for to a power spectral density spectrum. fft_mode: Mode of utilized FFT scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. from_win_equals_dual: Class-method for creating a unitary instance. ShortTimeFFT: Class this property belongs to. """ return self._scaling def scale_to(self, scaling: Literal['magnitude', 'psd']): """Scale window to obtain 'magnitude' or 'psd' scaling for the STFT. The window of a 'magnitude' spectrum has an integral of one, i.e., unit area for non-negative windows. This ensures that absolute the values of spectrum does not change if the length of the window changes (given the input signal is stationary). To represent the power spectral density ('psd') for varying length windows the area of the absolute square of the window needs to be unity. The `scaling` property shows the current scaling. The properties `fac_magnitude` and `fac_psd` show the scaling factors required to scale the STFT values to a magnitude or a psd spectrum. Note that a window cannot to be scaled to be `unitary`. Use `from_win_equals_dual` to create a unitary `ShortTimeFFT` instance. This method is called, if the initializer parameter `scale_to` is set. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. fac_psd: Scaling factor for to a power spectral density spectrum. fft_mode: Mode of utilized FFT scaling: Normalization applied to the window function. ShortTimeFFT: Class this method belongs to. """ if scaling not in (scaling_values := {'magnitude', 'psd'}): raise ValueError(f"{scaling=} not in {scaling_values}!") if self._scaling == scaling: # do nothing return s_fac = self.fac_psd if scaling == 'psd' else self.fac_magnitude self._win = self._win * s_fac self.win.setflags(write=False) if self._dual_win is not None: self._dual_win = self._dual_win / s_fac self.dual_win.setflags(write=False) self._fac_mag, self._fac_psd = None, None # reset scaling factors self._scaling = scaling @property def phase_shift(self) -> int | None: """If set, add linear phase `phase_shift` / `mfft` * `f` to each FFT slice of frequency `f`. Shifting (more precisely `rolling`) an `mfft`-point FFT input by `phase_shift` samples results in a multiplication of the output by ``np.exp(2j*np.pi*q*phase_shift/mfft)`` at the frequency q * `delta_f`. The default value 0 ensures that there is no phase shift on the zeroth slice (in which t=0 is centered). No phase shift (``phase_shift is None``) is equivalent to ``phase_shift = -mfft//2``. In this case slices are not shifted before calculating the FFT. The absolute value of `phase_shift` is limited to be less than `mfft`. See Also -------- delta_f: Width of the frequency bins of the STFT. f: Frequencies values of the STFT. mfft: Length of input for the FFT used ShortTimeFFT: Class this property belongs to. """ return self._phase_shift @phase_shift.setter def phase_shift(self, v: int | None): """The absolute value of the phase shift needs to be less than mfft samples. See the `phase_shift` getter method for more details. """ if v is None: self._phase_shift = v return if not isinstance(v, int | np.integer): raise ValueError(f"phase_shift={v} has the unit samples. Hence " + "it needs to be an int or it may be None!") if not (-self.mfft < v < self.mfft): raise ValueError("-mfft < phase_shift < mfft does not hold " + f"for mfft={self.mfft}, phase_shift={v}!") self._phase_shift = v def _x_slices(self, x: np.ndarray, k_off: int, p0: int, p1: int, padding: PAD_TYPE) -> Generator[np.ndarray, None, None]: """Generate signal slices along last axis of `x`. This method is only used by `stft_detrend`. The parameters are described in `~ShortTimeFFT.stft`. """ if padding not in (padding_types := get_args(PAD_TYPE)): raise ValueError(f"Parameter {padding=} not in {padding_types}!") pad_kws: dict[str, dict] = { # possible keywords to pass to np.pad: 'zeros': dict(mode='constant', constant_values=(0, 0)), 'edge': dict(mode='edge'), 'even': dict(mode='reflect', reflect_type='even'), 'odd': dict(mode='reflect', reflect_type='odd'), } # typing of pad_kws is needed to make mypy happy n, n1 = x.shape[-1], (p1 - p0) * self.hop k0 = p0 * self.hop - self.m_num_mid + k_off # start sample k1 = k0 + n1 + self.m_num # end sample i0, i1 = max(k0, 0), min(k1, n) # indexes to shorten x # dimensions for padding x: pad_width = [(0, 0)] * (x.ndim-1) + [(-min(k0, 0), max(k1 - n, 0))] x1 = np.pad(x[..., i0:i1], pad_width, **pad_kws[padding]) for k_ in range(0, n1, self.hop): yield x1[..., k_:k_ + self.m_num] def stft(self, x: np.ndarray, p0: int | None = None, p1: int | None = None, *, k_offset: int = 0, padding: PAD_TYPE = 'zeros', axis: int = -1) \ -> np.ndarray: """Perform the short-time Fourier transform. A two-dimensional matrix with ``p1-p0`` columns is calculated. The `f_pts` rows represent value at the frequencies `f`. The q-th column of the windowed FFT with the window `win` is centered at t[q]. The columns represent the values at the frequencies `f`. Parameters ---------- x : np.ndarray The input signal as real or complex valued array. For complex values, the property `fft_mode` must be set to 'twosided' or 'centered'. p0 : int | None The first element of the range of slices to calculate. If ``None`` then it is set to :attr:`p_min`, which is the smallest possible slice. p1 : int | None The end of the array. If ``None`` then `p_max(n)` is used. k_offset : int Index of first sample (t = 0) in `x`. padding : 'zeros' | 'edge' | 'even' | 'odd' Kind of values which are added, when the sliding window sticks out on either the lower or upper end of the input `x`. Zeros are added if the default 'zeros' is set. For 'edge' either the first or the last value of `x` is used. 'even' pads by reflecting the signal on the first or last sample and 'odd' additionally multiplies it with -1. axis : int The axis of `x` over which to compute the STFT. If not given, the last axis is used. Returns ------- S : np.ndarray A complex array is returned with the dimension always being larger by one than of `x`. The last axis always represents the time slices of the STFT. `axis` defines the frequency axis (default second to last). E.g., for a one-dimensional `x`, a complex 2d array is returned, with axis 0 representing frequency and axis 1 the time slices. See Also -------- delta_f: Width of the frequency bins of the STFT. delta_t: Time increment of STFT f: Frequencies values of the STFT. invertible: Check if STFT is invertible. :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. p_range: Determine and validate slice index range. stft_detrend: STFT with detrended segments. t: Times of STFT for an input signal with `n` samples. :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. """ return self.stft_detrend(x, None, p0, p1, k_offset=k_offset, padding=padding, axis=axis) def stft_detrend(self, x: np.ndarray, detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None, # noqa: E501 p0: int | None = None, p1: int | None = None, *, k_offset: int = 0, padding: PAD_TYPE = 'zeros', axis: int = -1) \ -> np.ndarray: """Calculate short-time Fourier transform with a trend being subtracted from each segment beforehand. When the parameter `detr` is ``None``, this method's behavior is identical to the `~ShortTimeFFT.stft` method. Note that due to the detrending, the original signal cannot be reconstructed by the `~ShortTimeFFT.istft`. Parameters ---------- x : np.ndarray The input signal as real or complex valued array. For complex values, the property `fft_mode` must be set to 'twosided' or 'centered'. detr : 'linear' | 'constant' | Callable[[np.ndarray], np.ndarray] | None If 'constant', the mean is subtracted, if set to "linear", the linear trend is removed from each segment. This is achieved by calling `~scipy.signal.detrend`. If `detr` is a function with one parameter, `detr` is applied to each segment. p0 : int | None The first element of the range of slices to calculate. If ``None`` then it is set to :attr:`p_min`, which is the smallest possible slice. p1 : int | None The end of the array. If ``None`` then `p_max(n)` is used. k_offset : int Index of first sample (t = 0) in `x`. padding : 'zeros' | 'edge' | 'even' | 'odd' Kind of values which are added, when the sliding window sticks out on either the lower or upper end of the input `x`. Zeros are added if the default 'zeros' is set. For 'edge' either the first or the last value of `x` is used. 'even' pads by reflecting the signal on the first or last sample and 'odd' additionally multiplies it with -1. axis: int The axis of `x` over which to compute the STFT. If not given, the last axis is used. Returns ------- S : np.ndarray A complex array is returned with the dimension always being larger by one than of `x`. The last axis always represents the time slices of the STFT. `axis` defines the frequency axis (default second to last). E.g., for a one-dimensional `x`, a complex 2d array is returned, with axis 0 representing frequency and axis 1 the time slices. See Also -------- invertible: Check if STFT is invertible. :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. :meth:`~ShortTimeFFT.stft`: Short-time Fourier transform (without detrending). :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. """ if self.onesided_fft and np.iscomplexobj(x): raise ValueError(f"Complex-valued `x` not allowed for {self.fft_mode=}'! " "Set property `fft_mode` to 'twosided' or 'centered'.") if isinstance(detr, str): detr = partial(detrend, type=detr) elif not (detr is None or callable(detr)): raise ValueError(f"Parameter {detr=} is not a str, function or " + "None!") n = x.shape[axis] if not (n >= (m2p := self.m_num-self.m_num_mid)): e_str = f'{len(x)=}' if x.ndim == 1 else f'of {axis=} of {x.shape}' raise ValueError(f"{e_str} must be >= ceil(m_num/2) = {m2p}!") if x.ndim > 1: # motivated by the NumPy broadcasting mechanisms: x = np.moveaxis(x, axis, -1) # determine slice index range: p0, p1 = self.p_range(n, p0, p1) S_shape_1d = (self.f_pts, p1 - p0) S_shape = x.shape[:-1] + S_shape_1d if x.ndim > 1 else S_shape_1d S = np.zeros(S_shape, dtype=complex) for p_, x_ in enumerate(self._x_slices(x, k_offset, p0, p1, padding)): if detr is not None: x_ = detr(x_) S[..., :, p_] = self._fft_func(x_ * self.win.conj()) if x.ndim > 1: return np.moveaxis(S, -2, axis if axis >= 0 else axis-1) return S def spectrogram(self, x: np.ndarray, y: np.ndarray | None = None, detr: Callable[[np.ndarray], np.ndarray] | Literal['linear', 'constant'] | None = None, # noqa: E501 *, p0: int | None = None, p1: int | None = None, k_offset: int = 0, padding: PAD_TYPE = 'zeros', axis: int = -1) \ -> np.ndarray: r"""Calculate spectrogram or cross-spectrogram. The spectrogram is the absolute square of the STFT, i.e., it is ``abs(S[q,p])**2`` for given ``S[q,p]`` and thus is always non-negative. For two STFTs ``Sx[q,p], Sy[q,p]``, the cross-spectrogram is defined as ``Sx[q,p] * np.conj(Sy[q,p])`` and is complex-valued. This is a convenience function for calling `~ShortTimeFFT.stft` / `stft_detrend`, hence all parameters are discussed there. Parameters ---------- x : np.ndarray The input signal as real or complex valued array. For complex values, the property `fft_mode` must be set to 'twosided' or 'centered'. y : np.ndarray The second input signal of the same shape as `x`. If ``None``, it is assumed to be `x`. For complex values, the property `fft_mode` must be set to 'twosided' or 'centered'. detr : 'linear' | 'constant' | Callable[[np.ndarray], np.ndarray] | None If 'constant', the mean is subtracted, if set to "linear", the linear trend is removed from each segment. This is achieved by calling `~scipy.signal.detrend`. If `detr` is a function with one parameter, `detr` is applied to each segment. For ``None`` (default), no trends are removed. p0 : int | None The first element of the range of slices to calculate. If ``None`` then it is set to :attr:`p_min`, which is the smallest possible slice. p1 : int | None The end of the array. If ``None`` then `p_max(n)` is used. k_offset : int Index of first sample (t = 0) in `x`. padding : 'zeros' | 'edge' | 'even' | 'odd' Kind of values which are added, when the sliding window sticks out on either the lower or upper end of the input `x`. Zeros are added if the default 'zeros' is set. For 'edge' either the first or the last value of `x` is used. 'even' pads by reflecting the signal on the first or last sample and 'odd' additionally multiplies it with -1. axis : int The axis of `x` over which to compute the STFT. If not given, the last axis is used. Returns ------- S_xy : np.ndarray A real-valued array with non-negative values is returned, if ``x is y`` or `y` is ``None``. The dimension is always by one larger than of `x`. The last axis always represents the time slices of the spectrogram. `axis` defines the frequency axis (default second to last). E.g., for a one-dimensional `x`, a complex 2d array is returned, with axis 0 representing frequency and axis 1 the time slices. Notes ----- The cross-spectrogram may be interpreted as the time-frequency analogon of the cross-spectral density (consult `csd`). The absolute square `|Sxy|²` of a cross-spectrogram `Sxy` divided by the spectrograms `Sxx` and `Syy` can be interpreted as a coherence spectrogram ``Cxy := abs(Sxy)**2 / (Sxx*Syy)``, which is the time-frequency analogon to `~coherence`. If the STFT is parametrized to be a unitary transform, i.e., utilitzing `~from_win_equals_dual`, then the value of the scalar product, hence also the energy, is preserved. Examples -------- The following example shows the spectrogram of a square wave with varying frequency :math:`f_i(t)` (marked by a green dashed line in the plot) sampled with 20 Hz. The utilized Gaussian window is 50 samples or 2.5 s long. For the `ShortTimeFFT`, the parameter ``mfft=800`` (oversampling factor 16) and the `hop` interval of 2 in was chosen to produce a sufficient number of points. The plot's colormap is logarithmically scaled as the power spectral density is in dB. The time extent of the signal `x` is marked by vertical dashed lines, and the shaded areas mark the presence of border effects. >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from scipy.signal import square, ShortTimeFFT >>> from scipy.signal.windows import gaussian ... >>> T_x, N = 1 / 20, 1000 # 20 Hz sampling rate for 50 s signal >>> t_x = np.arange(N) * T_x # time indexes for signal >>> f_i = 5e-3*(t_x - t_x[N // 3])**2 + 1 # varying frequency >>> x = square(2*np.pi*np.cumsum(f_i)*T_x) # the signal ... >>> g_std = 12 # standard deviation for Gaussian window in samples >>> win = gaussian(50, std=g_std, sym=True) # symmetric Gaussian wind. >>> SFT = ShortTimeFFT(win, hop=2, fs=1/T_x, mfft=800, scale_to='psd') >>> Sx2 = SFT.spectrogram(x) # calculate absolute square of STFT ... >>> fig1, ax1 = plt.subplots(figsize=(6., 4.)) # enlarge plot a bit >>> t_lo, t_hi = SFT.extent(N)[:2] # time range of plot >>> ax1.set_title(rf"Spectrogram ({SFT.m_num*SFT.T:g}$\,s$ Gaussian " + ... rf"window, $\sigma_t={g_std*SFT.T:g}\,$s)") >>> ax1.set(xlabel=f"Time $t$ in seconds ({SFT.p_num(N)} slices, " + ... rf"$\Delta t = {SFT.delta_t:g}\,$s)", ... ylabel=f"Freq. $f$ in Hz ({SFT.f_pts} bins, " + ... rf"$\Delta f = {SFT.delta_f:g}\,$Hz)", ... xlim=(t_lo, t_hi)) >>> Sx_dB = 10 * np.log10(np.fmax(Sx2, 1e-4)) # limit range to -40 dB >>> im1 = ax1.imshow(Sx_dB, origin='lower', aspect='auto', ... extent=SFT.extent(N), cmap='magma') >>> ax1.plot(t_x, f_i, 'g--', alpha=.5, label='$f_i(t)$') >>> fig1.colorbar(im1, label='Power Spectral Density ' + ... r"$20\,\log_{10}|S_x(t, f)|$ in dB") ... >>> # Shade areas where window slices stick out to the side: >>> for t0_, t1_ in [(t_lo, SFT.lower_border_end[0] * SFT.T), ... (SFT.upper_border_begin(N)[0] * SFT.T, t_hi)]: ... ax1.axvspan(t0_, t1_, color='w', linewidth=0, alpha=.3) >>> for t_ in [0, N * SFT.T]: # mark signal borders with vertical line ... ax1.axvline(t_, color='c', linestyle='--', alpha=0.5) >>> ax1.legend() >>> fig1.tight_layout() >>> plt.show() The logarithmic scaling reveals the odd harmonics of the square wave, which are reflected at the Nyquist frequency of 10 Hz. This aliasing is also the main source of the noise artifacts in the plot. See Also -------- :meth:`~ShortTimeFFT.stft`: Perform the short-time Fourier transform. stft_detrend: STFT with a trend subtracted from each segment. :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. """ Sx = self.stft_detrend(x, detr, p0, p1, k_offset=k_offset, padding=padding, axis=axis) if y is None or y is x: # do spectrogram: return Sx.real**2 + Sx.imag**2 # Cross-spectrogram: Sy = self.stft_detrend(y, detr, p0, p1, k_offset=k_offset, padding=padding, axis=axis) return Sx * Sy.conj() @property def dual_win(self) -> np.ndarray: """Dual window (canonical dual window by default). A STFT can be interpreted as the input signal being expressed as a weighted sum of modulated and time-shifted dual windows. If no dual window is given on instantiation, the canonical dual window, i.e., the window with the minimal energy (i.e., minimal L²-norm) is calculated. Alternative means for determining dual windows are provided by `closest_STFT_dual_window` and the `from_win_equals_dual` class-method. Note that `win` is also always a dual window of `dual_win`. `dual_win` has same length as `win`, namely `m_num` samples. If the dual window cannot be calculated a ``ValueError`` is raised. This attribute is read only and calculated lazily. To make this array immutable, its WRITEABLE flag is set to ``FALSE``. See Also -------- m_num: Number of samples in window `win` and `dual_win`. win: Window function as real- or complex-valued 1d array. from_win_equals_dual: Create instance where `win` and `dual_win` are equal. closest_STFT_dual_window: Calculate dual window closest to a desired window. numpy.ndarray.setflags: Modify array flags. ShortTimeFFT: Class this property belongs to. """ if self._dual_win is None: self._dual_win = _calc_dual_canonical_window(self.win, self.hop) self.dual_win.setflags(write=False) return self._dual_win @property def invertible(self) -> bool: """Check if STFT is invertible. This is achieved by trying to calculate the canonical dual window. See Also -------- :meth:`~ShortTimeFFT.istft`: Inverse short-time Fourier transform. m_num: Number of samples in window `win` and `dual_win`. dual_win: Dual window. win: Window for STFT. ShortTimeFFT: Class this property belongs to. """ try: return len(self.dual_win) > 0 # call self.dual_win() except ValueError: return False def istft(self, S: np.ndarray, k0: int = 0, k1: int | None = None, *, f_axis: int = -2, t_axis: int = -1) \ -> np.ndarray: """Inverse short-time Fourier transform. It returns an array of dimension ``S.ndim - 1`` which is real if `onesided_fft` is set, else complex. If the STFT is not `invertible`, or the parameters are out of bounds a ``ValueError`` is raised. Parameters ---------- S A complex valued array where `f_axis` denotes the frequency values and the `t-axis` dimension the temporal values of the STFT values. k0, k1 The start and the end index of the reconstructed signal. The default (``k0 = 0``, ``k1 = None``) assumes that the maximum length signal should be reconstructed. f_axis, t_axis The axes in `S` denoting the frequency and the time dimension. Notes ----- It is required that `S` has `f_pts` entries along the `f_axis`. For the `t_axis` it is assumed that the first entry corresponds to `p_min` * `delta_t` (being <= 0). The length of `t_axis` needs to be compatible with `k1`. I.e., ``S.shape[t_axis] >= self.p_max(k1)`` must hold, if `k1` is not ``None``. Else `k1` is set to `k_max` with:: q_max = S.shape[t_range] + self.p_min k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid The :ref:`tutorial_stft` section of the :ref:`user_guide` discussed the slicing behavior by means of an example. See Also -------- invertible: Check if STFT is invertible. :meth:`~ShortTimeFFT.stft`: Perform Short-time Fourier transform. :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. """ if f_axis == t_axis: raise ValueError(f"{f_axis=} may not be equal to {t_axis=}!") if S.shape[f_axis] != self.f_pts: raise ValueError(f"{S.shape[f_axis]=} must be equal to " + f"{self.f_pts=} ({S.shape=})!") n_min = self.m_num-self.m_num_mid # minimum signal length if not (S.shape[t_axis] >= (q_num := self.p_num(n_min))): raise ValueError(f"{S.shape[t_axis]=} needs to have at least " + f"{q_num} slices ({S.shape=})!") if t_axis != S.ndim - 1 or f_axis != S.ndim - 2: t_axis = S.ndim + t_axis if t_axis < 0 else t_axis f_axis = S.ndim + f_axis if f_axis < 0 else f_axis S = np.moveaxis(S, (f_axis, t_axis), (-2, -1)) q_max = S.shape[-1] + self.p_min k_max = (q_max - 1) * self.hop + self.m_num - self.m_num_mid k1 = k_max if k1 is None else k1 if not (self.k_min <= k0 < k1 <= k_max): raise ValueError(f"({self.k_min=}) <= ({k0=}) < ({k1=}) <= " + f"({k_max=}) is false!") if not (num_pts := k1 - k0) >= n_min: raise ValueError(f"({k1=}) - ({k0=}) = {num_pts} has to be at " + f"least the half the window length {n_min}!") q0 = (k0 // self.hop + self.p_min if k0 >= 0 else # p_min always <= 0 k0 // self.hop) q1 = min(self.p_max(k1), q_max) k_q0, k_q1 = self.nearest_k_p(k0), self.nearest_k_p(k1, left=False) n_pts = k_q1 - k_q0 + self.m_num - self.m_num_mid x = np.zeros(S.shape[:-2] + (n_pts,), dtype=float if self.onesided_fft else complex) for q_ in range(q0, q1): xs = self._ifft_func(S[..., :, q_ - self.p_min]) * self.dual_win i0 = q_ * self.hop - self.m_num_mid i1 = min(i0 + self.m_num, n_pts+k0) j0, j1 = 0, i1 - i0 if i0 < k0: # xs sticks out to the left on x: j0 += k0 - i0 i0 = k0 x[..., i0-k0:i1-k0] += xs[..., j0:j1] x = x[..., :k1-k0] if x.ndim > 1: x = np.moveaxis(x, -1, f_axis if f_axis < x.ndim else t_axis) return x @property def fac_magnitude(self) -> float: """Factor to multiply the STFT values by to scale each frequency slice to a magnitude spectrum. It is 1 if attribute ``scaling == 'magnitude'``. The window can be scaled to a magnitude spectrum by using the method `scale_to`. See Also -------- fac_psd: Scaling factor for to a power spectral density spectrum. scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. scaling: Normalization applied to the window function. ShortTimeFFT: Class this property belongs to. """ if self.scaling == 'magnitude': return 1 if self._fac_mag is None: self._fac_mag = 1 / abs(sum(self.win)) return self._fac_mag @property def fac_psd(self) -> float: """Factor to multiply the STFT values by to scale each frequency slice to a power spectral density (PSD). It is 1 if attribute ``scaling == 'psd'``. The window can be scaled to a psd spectrum by using the method `scale_to`. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. scaling: Normalization applied to the window function. ShortTimeFFT: Class this property belongs to. """ if self.scaling == 'psd': return 1 if self._fac_psd is None: self._fac_psd = 1 / np.sqrt( sum(self.win.real**2+self.win.imag**2) / self.T) return self._fac_psd @property def m_num(self) -> int: """Number of samples in window `win`. Note that the FFT can be oversampled by zero-padding. This is achieved by setting the `mfft` property. See Also -------- m_num_mid: Center index of window `win`. mfft: Length of input for the FFT used - may be larger than `m_num`. hop: Time increment in signal samples for sliding window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to. """ return len(self.win) @property def m_num_mid(self) -> int: """Center index of window `win`. For odd `m_num`, ``(m_num - 1) / 2`` is returned and for even `m_num` (per definition) ``m_num / 2`` is returned. See Also -------- m_num: Number of samples in window `win`. mfft: Length of input for the FFT used - may be larger than `m_num`. hop: ime increment in signal samples for sliding window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to. """ return self.m_num // 2 @cached_property def _pre_padding(self) -> tuple[int, int]: """Smallest signal index and slice index due to padding. Since, per convention, for time t=0, n,q is zero, the returned values are negative or zero. """ w2 = self.win.real**2 + self.win.imag**2 # move window to the left until the overlap with t >= 0 vanishes: n0 = -self.m_num_mid for p_, n_ in enumerate(range(n0, n0-self.m_num-1, -self.hop)): n_next = n_ - self.hop if n_next + self.m_num <= 0 or all(w2[n_next:] == 0): return n_, -p_ # Make the linter happy: raise RuntimeError("This code line should never run! Please file a bug.") @property def k_min(self) -> int: """The smallest possible signal index of the STFT. `k_min` is the index of the left-most non-zero value of the lowest slice `p_min`. Since the zeroth slice is centered over the zeroth sample of the input signal, `k_min` is never positive. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this property belongs to. """ return self._pre_padding[0] @property def p_min(self) -> int: """The smallest possible slice index. `p_min` is the index of the left-most slice, where the window still sticks into the signal, i.e., has non-zero part for t >= 0. `k_min` is the smallest index where the window function of the slice `p_min` is non-zero. Since, per convention the zeroth slice is centered at t=0, `p_min` <= 0 always holds. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. ShortTimeFFT: Class this property belongs to. """ return self._pre_padding[1] def _post_padding(self, n: int) -> tuple[int, int]: """Largest signal index and slice index due to padding. Parameters ---------- n : int Number of samples of input signal (must be ≥ half of the window length). Notes ----- Note that the return values are cached together with the parameter `n` to avoid unnecessary recalculations. """ if not (n >= (m2p := self.m_num - self.m_num_mid)): raise ValueError(f"Parameter n must be >= ceil(m_num/2) = {m2p}!") last_arg, last_return_value = self._cache_post_padding if n == last_arg: # use cached value: return last_return_value w2 = self.win.real**2 + self.win.imag**2 # move window to the right until the overlap for t < t[n] vanishes: q1 = n // self.hop # last slice index with t[p1] <= t[n] k1 = q1 * self.hop - self.m_num_mid for q_, k_ in enumerate(range(k1, n+self.m_num, self.hop), start=q1): n_next = k_ + self.hop if n_next >= n or all(w2[:n-n_next] == 0): return_value = k_ + self.m_num, q_ + 1 self._cache_post_padding = n, return_value return return_value raise RuntimeError("This code line should never run! Please file a bug.") # If this case is reached, it probably means the last slice should be # returned, i.e.: return k1 + self.m_num - self.m_num_mid, q1 + 1 def k_max(self, n: int) -> int: """First sample index after signal end not touched by a time slice. `k_max` - 1 is the largest sample index of the slice `p_max` - 1 for a given input signal of `n` samples. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. Parameters ---------- n : int Number of samples of input signal (must be ≥ half of the window length). See Also -------- k_min: The smallest possible signal index. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to. """ return self._post_padding(n)[0] def p_max(self, n: int) -> int: """Index of first non-overlapping upper time slice for `n` sample input. Note that center point t[p_max] = (p_max(n)-1) * `delta_t` is typically larger than last time index t[n-1] == (`n`-1) * `T`. The upper border of samples indexes covered by the window slices is given by `k_max`. Furthermore, `p_max` does not denote the number of slices `p_num` since `p_min` is typically less than zero. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. p_min: The smallest possible slice index. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to. """ return self._post_padding(n)[1] def p_num(self, n: int) -> int: """Number of time slices for an input signal with `n` samples. It is given by `p_num` = `p_max` - `p_min` with `p_min` typically being negative. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_range: Determine and validate slice index range. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this method belongs to. """ return self.p_max(n) - self.p_min @property def lower_border_end(self) -> tuple[int, int]: """First signal index and first slice index unaffected by pre-padding. Describes the point where the window does not stick out to the left of the signal domain. A detailed example is provided in the :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this property belongs to. """ if self._lower_border_end is not None: return self._lower_border_end # first non-zero element in self.win: m0 = np.flatnonzero(self.win.real**2 + self.win.imag**2)[0] # move window to the right until does not stick out to the left: k0 = -self.m_num_mid + m0 for q_, k_ in enumerate(range(k0, self.hop + 1, self.hop)): if k_ + self.hop >= 0: # next entry does not stick out anymore self._lower_border_end = (k_ + self.m_num, q_ + 1) return self._lower_border_end self._lower_border_end = (0, max(self.p_min, 0)) # ends at first slice return self._lower_border_end def upper_border_begin(self, n: int) -> tuple[int, int]: """First signal index and first slice index affected by post-padding. Describes the point where the window does begin stick out to the right of the signal domain. A detailed example is given :ref:`tutorial_stft_sliding_win` section of the :ref:`user_guide`. Parameters ---------- n : int Number of samples of input signal (must be ≥ half of the window length). Returns ------- k_ub : int Lowest signal index, where a touching time slice sticks out past the signal end. p_ub : int Lowest index of time slice of which the end sticks out past the signal end. Notes ----- Note that the return values are cached together with the parameter `n` to avoid unnecessary recalculations. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to. """ if not (n >= (m2p := self.m_num - self.m_num_mid)): raise ValueError(f"Parameter n must be >= ceil(m_num/2) = {m2p}!") last_arg, last_return_value = self._cache_upper_border_begin if n == last_arg: # use cached value: return last_return_value w2 = self.win.real**2 + self.win.imag**2 q2 = n // self.hop + 1 # first t[q] >= t[n] q1 = max((n-self.m_num) // self.hop - 1, -1) # move window left until does not stick out to the right: for q_ in range(q2, q1, -1): k_ = q_ * self.hop + (self.m_num - self.m_num_mid) if k_ <= n or all(w2[n-k_:] == 0): return_value = (q_ + 1) * self.hop - self.m_num_mid, q_ + 1 self. _cache_upper_border_begin = n, return_value return return_value # make linter happy: raise RuntimeError("This code line should never run! Please file a bug.") @property def delta_t(self) -> float: """Time increment of STFT. The time increment `delta_t` = `T` * `hop` represents the sample increment `hop` converted to time based on the sampling interval `T`. See Also -------- delta_f: Width of the frequency bins of the STFT. hop: Hop size in signal samples for sliding window. t: Times of STFT for an input signal with `n` samples. T: Sampling interval of input signal and window `win`. ShortTimeFFT: Class this property belongs to """ return self.T * self.hop def p_range(self, n: int, p0: int | None = None, p1: int | None = None) -> tuple[int, int]: """Determine and validate slice index range. Parameters ---------- n : int Number of samples of input signal, assuming t[0] = 0. p0 : int | None First slice index. If 0 then the first slice is centered at t = 0. If ``None`` then `p_min` is used. Note that p0 may be < 0 if slices are left of t = 0. p1 : int | None End of interval (last value is p1-1). If ``None`` then `p_max(n)` is used. Returns ------- p0_ : int The fist slice index p1_ : int End of interval (last value is p1-1). Notes ----- A ``ValueError`` is raised if ``p_min <= p0 < p1 <= p_max(n)`` does not hold. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., `p_max` - `p_min`. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this property belongs to. """ p_max = self.p_max(n) # shorthand p0_ = self.p_min if p0 is None else p0 p1_ = p_max if p1 is None else p1 if not (self.p_min <= p0_ < p1_ <= p_max): raise ValueError(f"Invalid Parameter {p0=}, {p1=}, i.e., " + f"{self.p_min=} <= p0 < p1 <= {p_max=} " + f"does not hold for signal length {n=}!") return p0_, p1_ def t(self, n: int, p0: int | None = None, p1: int | None = None, k_offset: int = 0) -> np.ndarray: """Times of STFT for an input signal with `n` samples. Returns a 1d array with times of the `~ShortTimeFFT.stft` values with the same parametrization. Note that the slices are ``delta_t = hop * T`` time units apart. Parameters ---------- n Number of sample of the input signal. p0 The first element of the range of slices to calculate. If ``None`` then it is set to :attr:`p_min`, which is the smallest possible slice. p1 The end of the array. If ``None`` then `p_max(n)` is used. k_offset Index of first sample (t = 0) in `x`. Notes ----- Note that the returned array is cached together with the method's call parameters to avoid unnecessary recalculations. See Also -------- delta_t: Time increment of STFT (``hop*T``) hop: Time increment in signal samples for sliding window. nearest_k_p: Nearest sample index k_p for which t[k_p] == t[p] holds. T: Sampling interval of input signal and of the window (``1/fs``). fs: Sampling frequency (being ``1/T``) ShortTimeFFT: Class this method belongs to. """ if not (n > 0 and isinstance(n, int | np.integer)): raise ValueError(f"Parameter {n=} is not a positive integer!") args = n, p0, p1, k_offset, self.T # since `self.T` is mutable, it's needed too last_args, last_return_value = self._cache_t if args == last_args: # use cached value: return last_return_value p0, p1 = self.p_range(n, p0, p1) return_value = np.arange(p0, p1) * self.delta_t + k_offset * self.T self._cache_t = args, return_value return return_value def nearest_k_p(self, k: int, left: bool = True) -> int: """Return nearest sample index k_p for which t[k_p] == t[p] holds. The nearest next smaller time sample p (where t[p] is the center position of the window of the p-th slice) is p_k = k // `hop`. If `hop` is a divisor of `k` then `k` is returned. If `left` is set then p_k * `hop` is returned else (p_k+1) * `hop`. This method can be used to slice an input signal into chunks for calculating the STFT and iSTFT incrementally. See Also -------- delta_t: Time increment of STFT (``hop*T``) hop: Time increment in signal samples for sliding window. T: Sampling interval of input signal and of the window (``1/fs``). fs: Sampling frequency (being ``1/T``) t: Times of STFT for an input signal with `n` samples. ShortTimeFFT: Class this method belongs to. """ p_q, remainder = divmod(k, self.hop) if remainder == 0: return k return p_q * self.hop if left else (p_q + 1) * self.hop @property def delta_f(self) -> float: """Width of the frequency bins of the STFT. Return the frequency interval `delta_f` = 1 / (`mfft` * `T`). See Also -------- delta_t: Time increment of STFT. f_pts: Number of points along the frequency axis. f: Frequencies values of the STFT. mfft: Length of the input for FFT used. T: Sampling interval. t: Times of STFT for an input signal with `n` samples. ShortTimeFFT: Class this property belongs to. """ return 1 / (self.mfft * self.T) @property def f_pts(self) -> int: """Number of points along the frequency axis. See Also -------- delta_f: Width of the frequency bins of the STFT. f: Frequencies values of the STFT. mfft: Length of the input for FFT used. ShortTimeFFT: Class this property belongs to. """ return self.mfft // 2 + 1 if self.onesided_fft else self.mfft @property def onesided_fft(self) -> bool: """Return True if a one-sided FFT is used. Returns ``True`` if `fft_mode` is either 'onesided' or 'onesided2X'. See Also -------- fft_mode: Utilized FFT ('twosided', 'centered', 'onesided' or 'onesided2X') ShortTimeFFT: Class this property belongs to. """ return self.fft_mode in {'onesided', 'onesided2X'} @property def f(self) -> np.ndarray: """Frequencies values of the STFT. A 1d array of length `f_pts` with `delta_f` spaced entries is returned. This array is calculated lazily. See Also -------- delta_f: Width of the frequency bins of the STFT. f_pts: Number of points along the frequency axis. mfft: Length of the input for FFT used. ShortTimeFFT: Class this property belongs to. """ last_state, last_return_value = self._cache_f current_state = self.fft_mode, self.mfft, self.T if current_state == last_state: # use cached value: return last_return_value if self.fft_mode in {'onesided', 'onesided2X'}: return_value = fft_lib.rfftfreq(self.mfft, self.T) elif self.fft_mode == 'twosided': return_value = fft_lib.fftfreq(self.mfft, self.T) elif self.fft_mode == 'centered': return_value = fft_lib.fftshift(fft_lib.fftfreq(self.mfft, self.T)) else: # This should never happen but makes the Linters happy: fft_modes = get_args(FFT_MODE_TYPE) raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") self._cache_f = current_state, return_value return return_value def _fft_func(self, x: np.ndarray) -> np.ndarray: """FFT based on the `fft_mode`, `mfft`, `scaling` and `phase_shift` attributes. For multidimensional arrays the transformation is carried out on the last axis. """ if self.phase_shift is not None: if x.shape[-1] < self.mfft: # zero pad if needed z_shape = list(x.shape) z_shape[-1] = self.mfft - x.shape[-1] x = np.hstack((x, np.zeros(z_shape, dtype=x.dtype))) p_s = (self.phase_shift + self.m_num_mid) % self.m_num x = np.roll(x, -p_s, axis=-1) if self.fft_mode == 'twosided': return fft_lib.fft(x, n=self.mfft, axis=-1) if self.fft_mode == 'centered': return fft_lib.fftshift(fft_lib.fft(x, self.mfft, axis=-1), axes=-1) if self.fft_mode == 'onesided': return fft_lib.rfft(x, n=self.mfft, axis=-1) if self.fft_mode == 'onesided2X': X = fft_lib.rfft(x, n=self.mfft, axis=-1) # Either squared magnitude (psd) or magnitude is doubled: fac = np.sqrt(2) if self.scaling == 'psd' else 2 # For even input length, the last entry is unpaired: X[..., 1: -1 if self.mfft % 2 == 0 else None] *= fac return X # This should never happen but makes the Linter happy: fft_modes = get_args(FFT_MODE_TYPE) raise RuntimeError(f"{self.fft_mode=} not in {fft_modes}!") def _ifft_func(self, X: np.ndarray) -> np.ndarray: """Inverse to `_fft_func`. Returned is an array of length `m_num`. If the FFT is `onesided` then a float array is returned else a complex array is returned. For multidimensional arrays the transformation is carried out on the last axis. """ if self.fft_mode == 'twosided': x = fft_lib.ifft(X, n=self.mfft, axis=-1) elif self.fft_mode == 'centered': x = fft_lib.ifft(fft_lib.ifftshift(X, axes=-1), n=self.mfft, axis=-1) elif self.fft_mode == 'onesided': x = fft_lib.irfft(X, n=self.mfft, axis=-1) elif self.fft_mode == 'onesided2X': Xc = X.copy() # we do not want to modify function parameters fac = np.sqrt(2) if self.scaling == 'psd' else 2 # For even length X the last value is not paired with a negative # value on the two-sided FFT: q1 = -1 if self.mfft % 2 == 0 else None Xc[..., 1:q1] /= fac x = fft_lib.irfft(Xc, n=self.mfft, axis=-1) else: # This should never happen but makes the Linter happy: raise RuntimeError(f"{self.fft_mode=} not in {get_args(FFT_MODE_TYPE)}!") if self.phase_shift is None: return x[..., :self.m_num] p_s = (self.phase_shift + self.m_num_mid) % self.m_num return np.roll(x, p_s, axis=-1)[..., :self.m_num] def extent(self, n: int, axes_seq: Literal['tf', 'ft'] = 'tf', center_bins: bool = False) -> tuple[float, float, float, float]: """Return minimum and maximum values time-frequency values. A tuple with four floats ``(t0, t1, f0, f1)`` for 'tf' and ``(f0, f1, t0, t1)`` for 'ft' is returned describing the corners of the time-frequency domain of the `~ShortTimeFFT.stft`. That tuple can be passed to `matplotlib.pyplot.imshow` as a parameter with the same name. Parameters ---------- n : int Number of samples in input signal. axes_seq : {'tf', 'ft'} Return time extent first and then frequency extent or vice versa. center_bins: bool If set (default ``False``), the values of the time slots and frequency bins are moved from the side the middle. This is useful, when plotting the `~ShortTimeFFT.stft` values as step functions, i.e., with no interpolation. See Also -------- :func:`matplotlib.pyplot.imshow`: Display data as an image. :class:`scipy.signal.ShortTimeFFT`: Class this method belongs to. Examples -------- The following two plots illustrate the effect of the parameter `center_bins`: The grid lines represent the three time and the four frequency values of the STFT. The left plot, where ``(t0, t1, f0, f1) = (0, 3, 0, 4)`` is passed as parameter ``extent`` to `~matplotlib.pyplot.imshow`, shows the standard behavior of the time and frequency values being at the lower edge of the corrsponding bin. The right plot, with ``(t0, t1, f0, f1) = (-0.5, 2.5, -0.5, 3.5)``, shows that the bins are centered over the respective values when passing ``center_bins=True``. >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from scipy.signal import ShortTimeFFT ... >>> n, m = 12, 6 >>> SFT = ShortTimeFFT.from_window('hann', fs=m, nperseg=m, noverlap=0) >>> Sxx = SFT.stft(np.cos(np.arange(n))) # produces a colorful plot ... >>> fig, axx = plt.subplots(1, 2, tight_layout=True, figsize=(6., 4.)) >>> for ax_, center_bins in zip(axx, (False, True)): ... ax_.imshow(abs(Sxx), origin='lower', interpolation=None, aspect='equal', ... cmap='viridis', extent=SFT.extent(n, 'tf', center_bins)) ... ax_.set_title(f"{center_bins=}") ... ax_.set_xlabel(f"Time ({SFT.p_num(n)} points, Δt={SFT.delta_t})") ... ax_.set_ylabel(f"Frequency ({SFT.f_pts} points, Δf={SFT.delta_f})") ... ax_.set_xticks(SFT.t(n)) # vertical grid line are timestamps ... ax_.set_yticks(SFT.f) # horizontal grid line are frequency values ... ax_.grid(True) >>> plt.show() Note that the step-like behavior with the constant colors is caused by passing ``interpolation=None`` to `~matplotlib.pyplot.imshow`. """ if axes_seq not in ('tf', 'ft'): raise ValueError(f"Parameter {axes_seq=} not in ['tf', 'ft']!") if self.onesided_fft: q0, q1 = 0, self.f_pts elif self.fft_mode == 'centered': q0 = -(self.mfft // 2) q1 = self.mfft // 2 if self.mfft % 2 == 0 else self.mfft // 2 + 1 else: raise ValueError(f"Attribute fft_mode={self.fft_mode} must be " + "in ['centered', 'onesided', 'onesided2X']") p0, p1 = self.p_min, self.p_max(n) # shorthand if center_bins: t0, t1 = self.delta_t * (p0 - 0.5), self.delta_t * (p1 - 0.5) f0, f1 = self.delta_f * (q0 - 0.5), self.delta_f * (q1 - 0.5) else: t0, t1 = self.delta_t * p0, self.delta_t * p1 f0, f1 = self.delta_f * q0, self.delta_f * q1 return (t0, t1, f0, f1) if axes_seq == 'tf' else (f0, f1, t0, t1)
ShortTimeFFT
python
weaviate__weaviate-python-client
weaviate/collections/queries/near_image/query/sync.py
{ "start": 308, "end": 449 }
class ____( Generic[Properties, References], _NearImageQueryExecutor[ConnectionSync, Properties, References], ): pass
_NearImageQuery
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/math_ops/cumulative_logsumexp_test.py
{ "start": 1081, "end": 4402 }
class ____(test.TestCase): valid_dtypes = [ dtypes.float32, dtypes.float64, dtypes.float16, dtypes.bfloat16, ] def _computeLogSumExp(self, x, **kwargs): result_naive = math_ops.cumsum(math_ops.exp(x), **kwargs) result_fused = math_ops.exp(math_ops.cumulative_logsumexp(x, **kwargs)) return result_naive, result_fused def _testLogSumExp(self, x, dtype=dtypes.float32, use_gpu=False, **kwargs): with self.cached_session(use_gpu=use_gpu): x = ops.convert_to_tensor(x, dtype=dtype) result_naive, result_fused = self.evaluate( self._computeLogSumExp(x, **kwargs)) tol = 2e-2 if dtype in [dtypes.float16, dtypes.bfloat16] else 1e-6 self.assertAllClose(result_naive, result_fused, rtol=tol, atol=tol) def _testLogSumExpAllArgs(self, x, axis=0, use_gpu=False): for dtype in self.valid_dtypes: for reverse in (True, False): for exclusive in (True, False): self._testLogSumExp( x, dtype=dtype, use_gpu=use_gpu, reverse=reverse, exclusive=exclusive, axis=axis) def testMinusInfinity(self): x = np.log([0., 0., 1., 1., 1., 1., 0., 0.]) self._testLogSumExpAllArgs(x, use_gpu=False) self._testLogSumExpAllArgs(x, use_gpu=True) def test1D(self): x = np.arange(10) / 10.0 - 0.5 self._testLogSumExpAllArgs(x, use_gpu=False) self._testLogSumExpAllArgs(x, use_gpu=True) def test2D(self): x = np.reshape(np.arange(20) / 20.0 - 0.5, (2, 10)) for axis in (-2, -1, 0, 1): self._testLogSumExpAllArgs(x, axis=axis, use_gpu=False) self._testLogSumExpAllArgs(x, axis=axis, use_gpu=True) def _testGradient(self, x, use_gpu=False, **kwargs): with self.cached_session(use_gpu=use_gpu): x = ops.convert_to_tensor(x, dtype=dtypes.float64) grad_naive_theoretical, _ = gradient_checker_v2.compute_gradient( lambda y: math_ops.cumsum(math_ops.exp(y), **kwargs), [x]) grad_fused_theoretical, _ = gradient_checker_v2.compute_gradient( lambda y: math_ops.exp(math_ops.cumulative_logsumexp(y, **kwargs)), [x]) self.assertAllClose(grad_fused_theoretical, grad_naive_theoretical) def testGradient(self): for reverse in (True, False): for exclusive in (True, False): x = np.arange(10) / 10.0 - 0.5 self._testGradient(x, use_gpu=False, reverse=reverse, exclusive=exclusive) self._testGradient(x, use_gpu=True, reverse=reverse, exclusive=exclusive) def _logSumExpMap(self, x): return map_fn.map_fn( lambda i: math_ops.reduce_logsumexp(x[:i + 1]), math_ops.range(array_ops.shape(x)[0]), dtype=x.dtype) def test1DLarge(self): # This test ensures that the operation is correct even when the naive # implementation would overflow. x_np = np.arange(20) * 20.0 for use_gpu in (True, False): with self.cached_session(use_gpu=use_gpu): x_tf = ops.convert_to_tensor(x_np, dtype=dtypes.float32) result_fused = self.evaluate(math_ops.cumulative_logsumexp(x_tf)) result_map = self.evaluate(self._logSumExpMap(x_tf)) self.assertAllClose(result_fused, result_map) if __name__ == '__main__': test.main()
CumulativeLogsumexpTest
python
cython__cython
Cython/Debugger/libcython.py
{ "start": 33632, "end": 34245 }
class ____(CythonExecutionControlCommand, libpython.PythonStepperMixin): "Step through Cython, Python or C code." name = 'cy -step' stepinto = True @libpython.dont_suppress_errors def invoke(self, args, from_tty): if self.is_python_function(): self.python_step(self.stepinto) elif not self.is_cython_function(): if self.stepinto: command = 'step' else: command = 'next' self.finish_executing(gdb.execute(command, to_string=True)) else: self.step(stepinto=self.stepinto)
CyStep
python
apache__airflow
providers/standard/tests/unit/standard/sensors/test_filesystem.py
{ "start": 1404, "end": 7527 }
class ____: def setup_method(self): from airflow.providers.standard.hooks.filesystem import FSHook hook = FSHook() args = {"owner": "airflow", "start_date": DEFAULT_DATE} dag = DAG(TEST_DAG_ID + "test_schedule_dag_once", schedule=timedelta(days=1), default_args=args) self.hook = hook self.dag = dag def test_simple(self): with tempfile.NamedTemporaryFile() as tmp: task = FileSensor( task_id="test", filepath=tmp.name[1:], fs_conn_id="fs_default", dag=self.dag, timeout=0, ) task._hook = self.hook task.execute({}) def test_file_in_nonexistent_dir(self): temp_dir = tempfile.mkdtemp() task = FileSensor( task_id="test", filepath=temp_dir[1:] + "/file", fs_conn_id="fs_default", dag=self.dag, timeout=0, poke_interval=1, ) task._hook = self.hook try: with pytest.raises(AirflowSensorTimeout): task.execute({}) finally: shutil.rmtree(temp_dir) def test_empty_dir(self): temp_dir = tempfile.mkdtemp() task = FileSensor( task_id="test", filepath=temp_dir[1:], fs_conn_id="fs_default", dag=self.dag, timeout=0, poke_interval=1, ) task._hook = self.hook try: with pytest.raises(AirflowSensorTimeout): task.execute({}) finally: shutil.rmtree(temp_dir) def test_file_in_dir(self): temp_dir = tempfile.mkdtemp() task = FileSensor( task_id="test", filepath=temp_dir[1:], fs_conn_id="fs_default", dag=self.dag, timeout=0, ) task._hook = self.hook try: # `touch` the dir open(temp_dir + "/file", "a").close() task.execute({}) finally: shutil.rmtree(temp_dir) def test_default_fs_conn_id(self): with tempfile.NamedTemporaryFile() as tmp: task = FileSensor( task_id="test", filepath=tmp.name[1:], dag=self.dag, timeout=0, ) task._hook = self.hook task.execute({}) def test_wildcard_file(self): suffix = ".txt" with tempfile.NamedTemporaryFile(suffix=suffix) as tmp: fileglob = os.path.join(os.path.dirname(tmp.name), "*" + suffix) task = FileSensor( task_id="test", filepath=fileglob, fs_conn_id="fs_default", dag=self.dag, timeout=0, ) task._hook = self.hook task.execute({}) def test_wildcard_empty_directory(self): with tempfile.TemporaryDirectory() as temp_dir: with tempfile.TemporaryDirectory(suffix="subdir", dir=temp_dir): task = FileSensor( task_id="test", filepath=os.path.join(temp_dir, "*dir"), fs_conn_id="fs_default", dag=self.dag, timeout=0, ) task._hook = self.hook # No files in dir with pytest.raises(AirflowSensorTimeout): task.execute({}) def test_wildcard_directory_with_files(self): with tempfile.TemporaryDirectory() as temp_dir: with tempfile.TemporaryDirectory(suffix="subdir", dir=temp_dir) as subdir: task = FileSensor( task_id="test", filepath=os.path.join(temp_dir, "*dir"), fs_conn_id="fs_default", dag=self.dag, timeout=0, ) task._hook = self.hook # `touch` the file in subdir open(os.path.join(subdir, "file"), "a").close() task.execute({}) def test_wildcared_directory(self): temp_dir = tempfile.mkdtemp() subdir = tempfile.mkdtemp(dir=temp_dir) task = FileSensor( task_id="test", filepath=temp_dir + "/**", fs_conn_id="fs_default", dag=self.dag, timeout=0, poke_interval=1, recursive=True, ) task._hook = self.hook try: # `touch` the file in subdir open(subdir + "/file", "a").close() task.execute({}) finally: shutil.rmtree(temp_dir) def test_subdirectory_not_empty(self): suffix = ".txt" temp_dir = tempfile.mkdtemp() subdir = tempfile.mkdtemp(dir=temp_dir) with tempfile.NamedTemporaryFile(suffix=suffix, dir=subdir): task = FileSensor( task_id="test", filepath=temp_dir, fs_conn_id="fs_default", dag=self.dag, timeout=0, ) task._hook = self.hook task.execute({}) shutil.rmtree(temp_dir) def test_subdirectory_empty(self, tmp_path): (tmp_path / "subdir").mkdir() task = FileSensor( task_id="test", filepath=tmp_path.as_posix(), fs_conn_id="fs_default", dag=self.dag, timeout=0, poke_interval=1, ) task._hook = self.hook with pytest.raises(AirflowSensorTimeout): task.execute({}) def test_task_defer(self): task = FileSensor( task_id="test", filepath="temp_dir", fs_conn_id="fs_default", deferrable=True, dag=self.dag, ) with pytest.raises(TaskDeferred) as exc: task.execute({}) assert isinstance(exc.value.trigger, FileTrigger), "Trigger is not a FileTrigger"
TestFileSensor
python
getsentry__sentry
src/sentry/integrations/source_code_management/search.py
{ "start": 1224, "end": 5373 }
class ____(IntegrationEndpoint, Generic[T], ABC): owner = ApiOwner.ECOSYSTEM publish_status = { "GET": ApiPublishStatus.PRIVATE, } @property def issue_field(self) -> str: return "externalIssue" # not used in VSTS @property def repository_field(self) -> str | None: return None @property @abstractmethod def integration_provider(self) -> str | None: raise NotImplementedError @property @abstractmethod def installation_class( self, ) -> type[T]: raise NotImplementedError @abstractmethod def handle_search_issues(self, installation: T, query: str, repo: str | None) -> Response: raise NotImplementedError def record_event( self, event: SCMIntegrationInteractionType, organization_id: int, integration_id: int, ) -> SCMIntegrationInteractionEvent: # XXX (mifu67): self.integration_provider is None for the GithubSharedSearchEndpoint, # which is used by both GitHub and GitHub Enterprise. provider_name = ( IntegrationProviderSlug.GITHUB.value if self.integration_provider is None else self.integration_provider ) return SCMIntegrationInteractionEvent( interaction_type=event, provider_key=provider_name, organization_id=organization_id, integration_id=integration_id, ) # not used in VSTS def handle_search_repositories( self, integration: Integration, installation: T, query: str ) -> Response: raise NotImplementedError def get( self, request: Request, organization: RpcOrganization, integration_id: int, **kwds: Any ) -> Response: with self.record_event( SCMIntegrationInteractionType.GET, organization_id=organization.id, integration_id=integration_id, ).capture() as lifecycle: integration_query = Q( organizationintegration__organization_id=organization.id, id=integration_id ) if self.integration_provider: integration_query &= Q(provider=self.integration_provider) try: integration: Integration = Integration.objects.get(integration_query) except Integration.DoesNotExist: lifecycle.record_halt(str(SourceCodeSearchEndpointHaltReason.MISSING_INTEGRATION)) return Response(status=404) serializer = SourceCodeSearchSerializer(data=request.query_params) if not serializer.is_valid(): lifecycle.record_halt(str(SourceCodeSearchEndpointHaltReason.SERIALIZER_ERRORS)) return self.respond(serializer.errors, status=400) field = serializer.validated_data["field"] query = serializer.validated_data["query"] installation = integration.get_installation(organization.id) if not isinstance(installation, self.installation_class): raise NotFound( f"Integration by that id is not of type {self.integration_provider}." ) if field == self.issue_field: repo = None if self.repository_field: # only fetch repository repo = request.GET.get(self.repository_field) if repo is None: lifecycle.record_halt( str(SourceCodeSearchEndpointHaltReason.MISSING_REPOSITORY_FIELD) ) return Response( {"detail": f"{self.repository_field} is a required parameter"}, status=400, ) return self.handle_search_issues(installation, query, repo) if self.repository_field and field == self.repository_field: return self.handle_search_repositories(integration, installation, query) return Response({"detail": "Invalid field"}, status=400)
SourceCodeSearchEndpoint
python
kamyu104__LeetCode-Solutions
Python/kth-smallest-element-in-a-bst.py
{ "start": 37, "end": 586 }
class ____(object): # @param {TreeNode} root # @param {integer} k # @return {integer} def kthSmallest(self, root, k): s, cur, rank = [], root, 0 while s or cur: if cur: s.append(cur) cur = cur.left else: cur = s.pop() rank += 1 if rank == k: return cur.val cur = cur.right return float("-inf") # time: O(max(h, k)) # space: O(h) from itertools import islice
Solution
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1588339, "end": 1589306 }
class ____(sgqlc.types.Union): """An item in an issue timeline""" __schema__ = github_schema __types__ = ( AddedToProjectEvent, AssignedEvent, ClosedEvent, CommentDeletedEvent, ConnectedEvent, ConvertedNoteToIssueEvent, ConvertedToDiscussionEvent, CrossReferencedEvent, DemilestonedEvent, DisconnectedEvent, IssueComment, LabeledEvent, LockedEvent, MarkedAsDuplicateEvent, MentionedEvent, MilestonedEvent, MovedColumnsInProjectEvent, PinnedEvent, ReferencedEvent, RemovedFromProjectEvent, RenamedTitleEvent, ReopenedEvent, SubscribedEvent, TransferredEvent, UnassignedEvent, UnlabeledEvent, UnlockedEvent, UnmarkedAsDuplicateEvent, UnpinnedEvent, UnsubscribedEvent, UserBlockedEvent, )
IssueTimelineItems
python
pydantic__pydantic
pydantic/v1/errors.py
{ "start": 6663, "end": 6811 }
class ____(_PathValueError): code = 'path.not_a_directory' msg_template = 'path "{path}" does not point to a directory'
PathNotADirectoryError
python
spack__spack
lib/spack/spack/new_installer.py
{ "start": 5923, "end": 8498 }
class ____: """Emulates ./build 2>&1 | tee build.log. The output is sent both to a log file and the parent process (if echoing is enabled). The control_fd is used to enable/disable echoing. The initial log file is /dev/null and can be changed later with set_output_file().""" def __init__(self, control: Connection, parent: Connection) -> None: self.control = control self.parent = parent dev_null_fd = os.open(os.devnull, os.O_WRONLY) #: The file descriptor of the log file (initially /dev/null) self.log_fd = os.dup(dev_null_fd) os.close(dev_null_fd) r, w = os.pipe() self.tee_thread = threading.Thread( target=tee, args=(self.control.fileno(), r, self.log_fd, self.parent.fileno()), daemon=True, ) self.tee_thread.start() os.dup2(w, sys.stdout.fileno()) os.dup2(w, sys.stderr.fileno()) os.close(w) def set_output_file(self, path: str) -> None: """Redirect output to the specified log file.""" log_fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o644) os.dup2(log_fd, self.log_fd) os.close(log_fd) def close(self) -> None: # Closing stdout and stderr should close the last reference to the write end of the pipe, # causing the tee thread to wake up, flush the last data, and exit. os.close(sys.stdout.fileno()) os.close(sys.stderr.fileno()) self.tee_thread.join() # Only then close the other fds. self.control.close() self.parent.close() os.close(self.log_fd) def install_from_buildcache( mirrors: List[spack.url_buildcache.MirrorURLAndVersion], spec: spack.spec.Spec, unsigned: Optional[bool], state_stream: io.TextIOWrapper, ) -> bool: send_state("fetching from build cache", state_stream) tarball_stage = spack.binary_distribution.download_tarball(spec.build_spec, unsigned, mirrors) if tarball_stage is None: return False send_state("relocating", state_stream) spack.binary_distribution.extract_tarball(spec, tarball_stage, force=False) if spec.spliced: # overwrite old metadata with new spack.store.STORE.layout.write_spec(spec, spack.store.STORE.layout.spec_file_path(spec)) # now a block of curious things follow that should be fixed. pkg = spec.package if hasattr(pkg, "_post_buildcache_install_hook"): pkg._post_buildcache_install_hook() pkg.installed_from_binary_cache = True return True
Tee
python
apache__airflow
providers/hashicorp/src/airflow/providers/hashicorp/secrets/vault.py
{ "start": 1128, "end": 11378 }
class ____(BaseSecretsBackend, LoggingMixin): """ Retrieves Connections and Variables from Hashicorp Vault. Configurable via ``airflow.cfg`` as follows: .. code-block:: ini [secrets] backend = airflow.providers.hashicorp.secrets.vault.VaultBackend backend_kwargs = { "connections_path": "connections", "url": "http://127.0.0.1:8200", "mount_point": "airflow" } For example, if your keys are under ``connections`` path in ``airflow`` mount_point, this would be accessible if you provide ``{"connections_path": "connections"}`` and request conn_id ``smtp_default``. :param connections_path: Specifies the path of the secret to read to get Connections. (default: 'connections'). If set to None (null), requests for connections will not be sent to Vault. :param variables_path: Specifies the path of the secret to read to get Variable. (default: 'variables'). If set to None (null), requests for variables will not be sent to Vault. :param config_path: Specifies the path of the secret to read Airflow Configurations (default: 'config'). If set to None (null), requests for configurations will not be sent to Vault. :param url: Base URL for the Vault instance being addressed. :param auth_type: Authentication Type for Vault. Default is ``token``. Available values are: ('approle', 'aws_iam', 'azure', 'github', 'gcp', 'kubernetes', 'ldap', 'radius', 'token', 'userpass') :param auth_mount_point: It can be used to define mount_point for authentication chosen Default depends on the authentication method used. :param mount_point: The "path" the secret engine was mounted on. Default is "secret". Note that this mount_point is not used for authentication if authentication is done via a different engine. If set to None, the mount secret should be provided as a prefix for each variable/connection_id. For authentication mount_points see, auth_mount_point. :param kv_engine_version: Select the version of the engine to run (``1`` or ``2``, default: ``2``). :param token: Authentication token to include in requests sent to Vault. (for ``token`` and ``github`` auth_type) :param token_path: path to file containing authentication token to include in requests sent to Vault (for ``token`` and ``github`` auth_type). :param username: Username for Authentication (for ``ldap`` and ``userpass`` auth_type). :param password: Password for Authentication (for ``ldap`` and ``userpass`` auth_type). :param key_id: Key ID for Authentication (for ``aws_iam`` and ''azure`` auth_type). :param secret_id: Secret ID for Authentication (for ``approle``, ``aws_iam`` and ``azure`` auth_types). :param role_id: Role ID for Authentication (for ``approle``, ``aws_iam`` auth_types). :param assume_role_kwargs: AWS assume role param. See AWS STS Docs: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts/client/assume_role.html :param region: AWS region for STS API calls (for ``aws_iam`` auth_type). :param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type). :param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, default: ``/var/run/secrets/kubernetes.io/serviceaccount/token``). :param gcp_key_path: Path to Google Cloud Service Account key file (JSON) (for ``gcp`` auth_type). Mutually exclusive with gcp_keyfile_dict. :param gcp_keyfile_dict: Dictionary of keyfile parameters. (for ``gcp`` auth_type). Mutually exclusive with gcp_key_path. :param gcp_scopes: Comma-separated string containing OAuth2 scopes (for ``gcp`` auth_type). :param azure_tenant_id: The tenant id for the Azure Active Directory (for ``azure`` auth_type). :param azure_resource: The configured URL for the application registered in Azure Active Directory (for ``azure`` auth_type). :param radius_host: Host for radius (for ``radius`` auth_type). :param radius_secret: Secret for radius (for ``radius`` auth_type). :param radius_port: Port for radius (for ``radius`` auth_type). """ def __init__( self, connections_path: str | None = "connections", variables_path: str | None = "variables", config_path: str | None = "config", url: str | None = None, auth_type: str = "token", auth_mount_point: str | None = None, mount_point: str | None = "secret", kv_engine_version: int = 2, token: str | None = None, token_path: str | None = None, username: str | None = None, password: str | None = None, key_id: str | None = None, secret_id: str | None = None, role_id: str | None = None, assume_role_kwargs: dict | None = None, region: str | None = None, kubernetes_role: str | None = None, kubernetes_jwt_path: str = "/var/run/secrets/kubernetes.io/serviceaccount/token", gcp_key_path: str | None = None, gcp_keyfile_dict: dict | None = None, gcp_scopes: str | None = None, azure_tenant_id: str | None = None, azure_resource: str | None = None, radius_host: str | None = None, radius_secret: str | None = None, radius_port: int | None = None, **kwargs, ): super().__init__() self.connections_path = connections_path.rstrip("/") if connections_path is not None else None self.variables_path = variables_path.rstrip("/") if variables_path is not None else None self.config_path = config_path.rstrip("/") if config_path is not None else None self.mount_point = mount_point self.kv_engine_version = kv_engine_version self.vault_client = _VaultClient( url=url, auth_type=auth_type, auth_mount_point=auth_mount_point, mount_point=mount_point, kv_engine_version=kv_engine_version, token=token, token_path=token_path, username=username, password=password, key_id=key_id, secret_id=secret_id, role_id=role_id, assume_role_kwargs=assume_role_kwargs, region=region, kubernetes_role=kubernetes_role, kubernetes_jwt_path=kubernetes_jwt_path, gcp_key_path=gcp_key_path, gcp_keyfile_dict=gcp_keyfile_dict, gcp_scopes=gcp_scopes, azure_tenant_id=azure_tenant_id, azure_resource=azure_resource, radius_host=radius_host, radius_secret=radius_secret, radius_port=radius_port, **kwargs, ) def _parse_path(self, secret_path: str) -> tuple[str | None, str | None]: if not self.mount_point: split_secret_path = secret_path.split("/", 1) if len(split_secret_path) < 2: return None, None return split_secret_path[0], split_secret_path[1] return "", secret_path def get_response(self, conn_id: str) -> dict | None: """ Get data from Vault. :return: The data from the Vault path if exists """ mount_point, conn_key = self._parse_path(conn_id) if self.connections_path is None or conn_key is None: return None if self.connections_path == "": secret_path = conn_key else: secret_path = self.build_path(self.connections_path, conn_key) return self.vault_client.get_secret( secret_path=(mount_point + "/" if mount_point else "") + secret_path ) # Make sure connection is imported this way for type checking, otherwise when importing # the backend it will get a circular dependency and fail if TYPE_CHECKING: from airflow.models.connection import Connection def get_connection(self, conn_id: str) -> Connection | None: """ Get connection from Vault as secret. Prioritize conn_uri if exists, if not fall back to normal Connection creation. :return: A Connection object constructed from Vault data """ # The Connection needs to be locally imported because otherwise we get into cyclic import # problems when instantiating the backend during configuration from airflow.models.connection import Connection response = self.get_response(conn_id) if response is None: return None uri = response.get("conn_uri") if uri: return Connection(conn_id, uri=uri) return Connection(conn_id, **response) def get_variable(self, key: str) -> str | None: """ Get Airflow Variable. :param key: Variable Key :return: Variable Value retrieved from the vault """ mount_point, variable_key = self._parse_path(key) if self.variables_path is None or variable_key is None: return None if self.variables_path == "": secret_path = variable_key else: secret_path = self.build_path(self.variables_path, variable_key) response = self.vault_client.get_secret( secret_path=(mount_point + "/" if mount_point else "") + secret_path ) return response.get("value") if response else None def get_config(self, key: str) -> str | None: """ Get Airflow Configuration. :param key: Configuration Option Key :return: Configuration Option Value retrieved from the vault """ mount_point, config_key = self._parse_path(key) if self.config_path is None or config_key is None: return None if self.config_path == "": secret_path = config_key else: secret_path = self.build_path(self.config_path, config_key) response = self.vault_client.get_secret( secret_path=(mount_point + "/" if mount_point else "") + secret_path ) return response.get("value") if response else None
VaultBackend