language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
getsentry__sentry
src/sentry/users/api/serializers/useremail.py
{ "start": 449, "end": 1025 }
class ____(Serializer): def serialize( self, obj: UserEmail, attrs: Mapping[str, Any], user: User | RpcUser | AnonymousUser, **kwargs: Any, ) -> UserEmailSerializerResponse: if isinstance(user, AnonymousUser): raise TypeError("must pass user=... to serialize(...)") primary_email = UserEmail.objects.get_primary_email(user) return { "email": obj.email, "isPrimary": obj.email == primary_email.email, "isVerified": obj.is_verified, }
UserEmailSerializer
python
urllib3__urllib3
src/urllib3/contrib/emscripten/response.py
{ "start": 561, "end": 694 }
class ____: status_code: int headers: dict[str, str] body: IOBase | bytes request: EmscriptenRequest
EmscriptenResponse
python
aio-libs__aiohttp
tests/test_resolver.py
{ "start": 3169, "end": 24045 }
class ____: def __init__(self, host: str) -> None: self.host = host async def fake_aiodns_getaddrinfo_ipv4_result( hosts: Collection[str], ) -> FakeAIODNSAddrInfoIPv4Result: return FakeAIODNSAddrInfoIPv4Result(hosts=hosts) async def fake_aiodns_getaddrinfo_ipv6_result( hosts: Collection[str], ) -> FakeAIODNSAddrInfoIPv6Result: return FakeAIODNSAddrInfoIPv6Result(hosts=hosts) async def fake_aiodns_getnameinfo_ipv6_result( host: str, ) -> FakeAIODNSNameInfoIPv6Result: return FakeAIODNSNameInfoIPv6Result(host) async def fake_query_result(result: Iterable[str]) -> list[FakeQueryResult]: return [FakeQueryResult(host=h) for h in result] def fake_addrinfo(hosts: Collection[str]) -> Callable[..., Awaitable[_AddrInfo4]]: async def fake(*args: Any, **kwargs: Any) -> _AddrInfo4: if not hosts: raise socket.gaierror return [(socket.AF_INET, None, socket.SOCK_STREAM, None, (h, 0)) for h in hosts] return fake def fake_ipv6_addrinfo(hosts: Collection[str]) -> Callable[..., Awaitable[_AddrInfo6]]: async def fake(*args: Any, **kwargs: Any) -> _AddrInfo6: if not hosts: raise socket.gaierror return [ ( socket.AF_INET6, None, socket.SOCK_STREAM, None, (h, 0, 0, 3 if ip_address(h).is_link_local else 0), ) for h in hosts ] return fake def fake_ipv6_nameinfo(host: str) -> Callable[..., Awaitable[tuple[str, int]]]: async def fake(*args: Any, **kwargs: Any) -> tuple[str, int]: return host, 0 return fake @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_positive_ipv4_lookup( loop: asyncio.AbstractEventLoop, ) -> None: with patch("aiodns.DNSResolver") as mock: mock().getaddrinfo.return_value = fake_aiodns_getaddrinfo_ipv4_result( ["127.0.0.1"] ) resolver = AsyncResolver() real = await resolver.resolve("www.python.org") ipaddress.ip_address(real[0]["host"]) mock().getaddrinfo.assert_called_with( "www.python.org", family=socket.AF_INET, flags=socket.AI_ADDRCONFIG, port=0, type=socket.SOCK_STREAM, ) await resolver.close() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_positive_link_local_ipv6_lookup( loop: asyncio.AbstractEventLoop, ) -> None: with patch("aiodns.DNSResolver") as mock: mock().getaddrinfo.return_value = fake_aiodns_getaddrinfo_ipv6_result( ["fe80::1"] ) mock().getnameinfo.return_value = fake_aiodns_getnameinfo_ipv6_result( "fe80::1%eth0" ) resolver = AsyncResolver() real = await resolver.resolve("www.python.org") ipaddress.ip_address(real[0]["host"]) mock().getaddrinfo.assert_called_with( "www.python.org", family=socket.AF_INET, flags=socket.AI_ADDRCONFIG, port=0, type=socket.SOCK_STREAM, ) mock().getnameinfo.assert_called_with(("fe80::1", 0, 0, 3), _NAME_SOCKET_FLAGS) await resolver.close() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_multiple_replies(loop: asyncio.AbstractEventLoop) -> None: with patch("aiodns.DNSResolver") as mock: ips = ["127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4"] mock().getaddrinfo.return_value = fake_aiodns_getaddrinfo_ipv4_result(ips) resolver = AsyncResolver() real = await resolver.resolve("www.google.com") ipaddrs = [ipaddress.ip_address(x["host"]) for x in real] assert len(ipaddrs) > 3, "Expecting multiple addresses" await resolver.close() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_negative_lookup(loop: asyncio.AbstractEventLoop) -> None: with patch("aiodns.DNSResolver") as mock: mock().getaddrinfo.side_effect = aiodns.error.DNSError() resolver = AsyncResolver() with pytest.raises(OSError): await resolver.resolve("doesnotexist.bla") await resolver.close() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_no_hosts_in_getaddrinfo( loop: asyncio.AbstractEventLoop, ) -> None: with patch("aiodns.DNSResolver") as mock: mock().getaddrinfo.return_value = fake_aiodns_getaddrinfo_ipv4_result([]) resolver = AsyncResolver() with pytest.raises(OSError): await resolver.resolve("doesnotexist.bla") await resolver.close() async def test_threaded_resolver_positive_lookup() -> None: loop = Mock() loop.getaddrinfo = fake_addrinfo(["127.0.0.1"]) resolver = ThreadedResolver() resolver._loop = loop real = await resolver.resolve("www.python.org") assert real[0]["hostname"] == "www.python.org" ipaddress.ip_address(real[0]["host"]) async def test_threaded_resolver_positive_ipv6_link_local_lookup() -> None: loop = Mock() loop.getaddrinfo = fake_ipv6_addrinfo(["fe80::1"]) loop.getnameinfo = fake_ipv6_nameinfo("fe80::1%eth0") # Mock the fake function that was returned by helper functions loop.getaddrinfo = create_autospec(loop.getaddrinfo) loop.getnameinfo = create_autospec(loop.getnameinfo) # Set the correct return values for mock functions loop.getaddrinfo.return_value = await fake_ipv6_addrinfo(["fe80::1"])() loop.getnameinfo.return_value = await fake_ipv6_nameinfo("fe80::1%eth0")() resolver = ThreadedResolver() resolver._loop = loop real = await resolver.resolve("www.python.org") assert real[0]["hostname"] == "www.python.org" ipaddress.ip_address(real[0]["host"]) loop.getaddrinfo.assert_called_with( "www.python.org", 0, type=socket.SOCK_STREAM, family=socket.AF_INET, flags=socket.AI_ADDRCONFIG, ) loop.getnameinfo.assert_called_with(("fe80::1", 0, 0, 3), _NAME_SOCKET_FLAGS) async def test_threaded_resolver_multiple_replies() -> None: loop = Mock() ips = ["127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4"] loop.getaddrinfo = fake_addrinfo(ips) resolver = ThreadedResolver() resolver._loop = loop real = await resolver.resolve("www.google.com") ipaddrs = [ipaddress.ip_address(x["host"]) for x in real] assert len(ipaddrs) > 3, "Expecting multiple addresses" async def test_threaded_negative_lookup() -> None: loop = Mock() ips: list[str] = [] loop.getaddrinfo = fake_addrinfo(ips) resolver = ThreadedResolver() resolver._loop = loop with pytest.raises(socket.gaierror): await resolver.resolve("doesnotexist.bla") async def test_threaded_negative_ipv6_lookup() -> None: loop = Mock() ips: list[str] = [] loop.getaddrinfo = fake_ipv6_addrinfo(ips) resolver = ThreadedResolver() resolver._loop = loop with pytest.raises(socket.gaierror): await resolver.resolve("doesnotexist.bla") async def test_threaded_negative_lookup_with_unknown_result() -> None: loop = Mock() # If compile CPython with `--disable-ipv6` option, # we will get an (int, bytes) tuple, instead of a Exception. async def unknown_addrinfo(*args: Any, **kwargs: Any) -> _UnknownAddrInfo: return [ ( socket.AF_INET6, socket.SOCK_STREAM, 6, "", (10, b"\x01\xbb\x00\x00\x00\x00*\x04NB\x00\x1a\x00\x00"), ) ] loop.getaddrinfo = unknown_addrinfo resolver = ThreadedResolver() resolver._loop = loop with patch("socket.has_ipv6", False): res = await resolver.resolve("www.python.org") assert len(res) == 0 async def test_close_for_threaded_resolver(loop: asyncio.AbstractEventLoop) -> None: resolver = ThreadedResolver() await resolver.close() @pytest.mark.skipif(aiodns is None, reason="aiodns required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_close_for_async_resolver(loop: asyncio.AbstractEventLoop) -> None: resolver = AsyncResolver() await resolver.close() async def test_default_loop_for_threaded_resolver( loop: asyncio.AbstractEventLoop, ) -> None: asyncio.set_event_loop(loop) resolver = ThreadedResolver() assert resolver._loop is loop @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_ipv6_positive_lookup( loop: asyncio.AbstractEventLoop, ) -> None: with patch("aiodns.DNSResolver") as mock: mock().getaddrinfo.return_value = fake_aiodns_getaddrinfo_ipv6_result(["::1"]) resolver = AsyncResolver() real = await resolver.resolve("www.python.org") ipaddress.ip_address(real[0]["host"]) mock().getaddrinfo.assert_called_with( "www.python.org", family=socket.AF_INET, flags=socket.AI_ADDRCONFIG, port=0, type=socket.SOCK_STREAM, ) await resolver.close() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_error_messages_passed( loop: asyncio.AbstractEventLoop, ) -> None: """Ensure error messages are passed through from aiodns.""" with patch("aiodns.DNSResolver", autospec=True, spec_set=True) as mock: mock().getaddrinfo.side_effect = aiodns.error.DNSError(1, "Test error message") resolver = AsyncResolver() with pytest.raises(OSError, match="Test error message") as excinfo: await resolver.resolve("x.org") assert excinfo.value.strerror == "Test error message" await resolver.close() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_error_messages_passed_no_hosts( loop: asyncio.AbstractEventLoop, ) -> None: """Ensure error messages are passed through from aiodns.""" with patch("aiodns.DNSResolver", autospec=True, spec_set=True) as mock: mock().getaddrinfo.return_value = fake_aiodns_getaddrinfo_ipv6_result([]) resolver = AsyncResolver() with pytest.raises(OSError, match="DNS lookup failed") as excinfo: await resolver.resolve("x.org") assert excinfo.value.strerror == "DNS lookup failed" await resolver.close() @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_aiodns_not_present( loop: asyncio.AbstractEventLoop, monkeypatch: pytest.MonkeyPatch ) -> None: monkeypatch.setattr("aiohttp.resolver.aiodns", None) with pytest.raises(RuntimeError): AsyncResolver() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") def test_aio_dns_is_default() -> None: assert DefaultResolver is AsyncResolver @pytest.mark.skipif(getaddrinfo, reason="aiodns <3.2.0 required") def test_threaded_resolver_is_default() -> None: assert DefaultResolver is ThreadedResolver @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") async def test_dns_resolver_manager_sharing( dns_resolver_manager: _DNSResolverManager, ) -> None: """Test that the DNSResolverManager shares a resolver among AsyncResolver instances.""" # Create two default AsyncResolver instances resolver1 = AsyncResolver() resolver2 = AsyncResolver() # Check that they share the same underlying resolver assert resolver1._resolver is resolver2._resolver # Create an AsyncResolver with custom args resolver3 = AsyncResolver(nameservers=["8.8.8.8"]) # Check that it has its own resolver assert resolver1._resolver is not resolver3._resolver # Cleanup await resolver1.close() await resolver2.close() await resolver3.close() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") async def test_dns_resolver_manager_singleton( dns_resolver_manager: _DNSResolverManager, ) -> None: """Test that DNSResolverManager is a singleton.""" # Create a second manager and check it's the same instance manager1 = dns_resolver_manager manager2 = _DNSResolverManager() assert manager1 is manager2 @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") async def test_dns_resolver_manager_resolver_lifecycle( dns_resolver_manager: _DNSResolverManager, ) -> None: """Test that DNSResolverManager creates and destroys resolver correctly.""" manager = dns_resolver_manager # Initially there should be no resolvers assert not manager._loop_data # Create a mock AsyncResolver for testing mock_client = Mock(spec=AsyncResolver) mock_client._loop = asyncio.get_running_loop() # Getting resolver should create one mock_loop = mock_client._loop resolver = manager.get_resolver(mock_client, mock_loop) assert resolver is not None assert manager._loop_data[mock_loop][0] is resolver # Getting it again should return the same instance assert manager.get_resolver(mock_client, mock_loop) is resolver # Clean up manager.release_resolver(mock_client, mock_loop) assert not manager._loop_data @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") async def test_dns_resolver_manager_client_registration( dns_resolver_manager: _DNSResolverManager, ) -> None: """Test client registration and resolver release logic.""" with patch("aiodns.DNSResolver") as mock: # Create resolver instances resolver1 = AsyncResolver() resolver2 = AsyncResolver() # Both should use the same resolver from the manager assert resolver1._resolver is resolver2._resolver # The manager should be tracking both clients assert resolver1._manager is resolver2._manager manager = resolver1._manager assert manager is not None loop = asyncio.get_running_loop() _, client_set = manager._loop_data[loop] assert len(client_set) == 2 # Close one resolver await resolver1.close() _, client_set = manager._loop_data[loop] assert len(client_set) == 1 # Resolver should still exist assert manager._loop_data # Not empty # Close the second resolver await resolver2.close() assert not manager._loop_data # Should be empty after closing all clients # Now all resolvers should be canceled and removed assert not manager._loop_data # Should be empty mock().cancel.assert_called_once() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") async def test_dns_resolver_manager_multiple_event_loops( dns_resolver_manager: _DNSResolverManager, ) -> None: """Test that DNSResolverManager correctly manages resolvers across different event loops.""" # Create separate resolvers for each loop resolver1 = Mock(name="resolver1") resolver2 = Mock(name="resolver2") # Create a patch that returns different resolvers based on the loop argument mock_resolver = Mock() mock_resolver.side_effect = lambda loop=None, **kwargs: ( resolver1 if loop is asyncio.get_running_loop() else resolver2 ) with patch("aiodns.DNSResolver", mock_resolver): manager = dns_resolver_manager # Create two mock clients on different loops mock_client1 = Mock(spec=AsyncResolver) mock_client1._loop = asyncio.get_running_loop() # Create a second event loop loop2 = Mock(spec=asyncio.AbstractEventLoop) mock_client2 = Mock(spec=AsyncResolver) mock_client2._loop = loop2 # Get resolvers for both clients loop1 = mock_client1._loop loop2 = mock_client2._loop # Get the resolvers through the manager manager_resolver1 = manager.get_resolver(mock_client1, loop1) manager_resolver2 = manager.get_resolver(mock_client2, loop2) # Should be different resolvers for different loops assert manager_resolver1 is resolver1 assert manager_resolver2 is resolver2 assert manager._loop_data[loop1][0] is resolver1 assert manager._loop_data[loop2][0] is resolver2 # Release the first resolver manager.release_resolver(mock_client1, loop1) # First loop's resolver should be gone, but second should remain assert loop1 not in manager._loop_data assert loop2 in manager._loop_data # Release the second resolver manager.release_resolver(mock_client2, loop2) # Both resolvers should be gone assert not manager._loop_data # Verify resolver cleanup resolver1.cancel.assert_called_once() resolver2.cancel.assert_called_once() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") async def test_dns_resolver_manager_weakref_garbage_collection() -> None: """Test that release_resolver handles None resolver due to weakref garbage collection.""" manager = _DNSResolverManager() # Create a mock resolver that will be None when accessed mock_resolver = Mock() mock_resolver.cancel = Mock() with patch("aiodns.DNSResolver", return_value=mock_resolver): # Create an AsyncResolver to get a resolver from the manager resolver = AsyncResolver() loop = asyncio.get_running_loop() # Manually corrupt the data to simulate garbage collection # by setting the resolver to None manager._loop_data[loop] = (None, manager._loop_data[loop][1]) # type: ignore[assignment] # This should not raise an AttributeError: 'NoneType' object has no attribute 'cancel' await resolver.close() # Verify no exception was raised and the loop data was cleaned up properly # Since we set resolver to None and there was one client, the entry should be removed assert loop not in manager._loop_data @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") async def test_dns_resolver_manager_missing_loop_data() -> None: """Test that release_resolver handles missing loop data gracefully.""" manager = _DNSResolverManager() with patch("aiodns.DNSResolver"): # Create an AsyncResolver resolver = AsyncResolver() loop = asyncio.get_running_loop() # Manually remove the loop data to simulate race condition manager._loop_data.clear() # This should not raise a KeyError await resolver.close() # Verify no exception was raised assert loop not in manager._loop_data @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_close_multiple_times() -> None: """Test that AsyncResolver.close() can be called multiple times without error.""" with patch("aiodns.DNSResolver") as mock_dns_resolver: mock_resolver = Mock() mock_resolver.cancel = Mock() mock_dns_resolver.return_value = mock_resolver # Create a resolver with custom args (dedicated resolver) resolver = AsyncResolver(nameservers=["8.8.8.8"]) # Close it once await resolver.close() mock_resolver.cancel.assert_called_once() # Close it again - should not raise AttributeError await resolver.close() # cancel should still only be called once mock_resolver.cancel.assert_called_once() @pytest.mark.skipif(not getaddrinfo, reason="aiodns >=3.2.0 required") @pytest.mark.usefixtures("check_no_lingering_resolvers") async def test_async_resolver_close_with_none_resolver() -> None: """Test that AsyncResolver.close() handles None resolver gracefully.""" with patch("aiodns.DNSResolver"): # Create a resolver with custom args (dedicated resolver) resolver = AsyncResolver(nameservers=["8.8.8.8"]) # Manually set resolver to None to simulate edge case resolver._resolver = None # type: ignore[assignment] # This should not raise AttributeError await resolver.close()
FakeQueryResult
python
xlwings__xlwings
xlwings/constants.py
{ "start": 50934, "end": 51078 }
class ____: xlDataBarFillGradient = 1 # from enum XlDataBarFillType xlDataBarFillSolid = 0 # from enum XlDataBarFillType
DataBarFillType
python
pandas-dev__pandas
pandas/core/resample.py
{ "start": 2338, "end": 60255 }
class ____(BaseGroupBy, PandasObject): """ Class for resampling datetimelike data, a groupby-like operation. See aggregate, transform, and apply functions on this object. It's easiest to use obj.resample(...) to use Resampler. Parameters ---------- obj : Series or DataFrame groupby : TimeGrouper Returns ------- a Resampler of the appropriate type Notes ----- After resampling, see aggregate, apply, and transform functions. """ _grouper: BinGrouper _timegrouper: TimeGrouper binner: DatetimeIndex | TimedeltaIndex | PeriodIndex # depends on subclass exclusions: frozenset[Hashable] = frozenset() # for SelectionMixin compat _internal_names_set = set({"obj", "ax", "_indexer"}) # to the groupby descriptor _attributes = [ "freq", "closed", "label", "convention", "origin", "offset", ] def __init__( self, obj: NDFrame, timegrouper: TimeGrouper, *, gpr_index: Index, group_keys: bool = False, selection=None, include_groups: bool = False, ) -> None: if include_groups: raise ValueError("include_groups=True is no longer allowed.") self._timegrouper = timegrouper self.keys = None self.sort = True self.group_keys = group_keys self.as_index = True self.obj, self.ax, self._indexer = self._timegrouper._set_grouper( self._convert_obj(obj), sort=True, gpr_index=gpr_index ) self.binner, self._grouper = self._get_binner() self._selection = selection if self._timegrouper.key is not None: self.exclusions = frozenset([self._timegrouper.key]) else: self.exclusions = frozenset() @final def __str__(self) -> str: """ Provide a nice str repr of our rolling object. """ attrs = ( f"{k}={getattr(self._timegrouper, k)}" for k in self._attributes if getattr(self._timegrouper, k, None) is not None ) return f"{type(self).__name__} [{', '.join(attrs)}]" @final def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self._attributes: return getattr(self._timegrouper, attr) if attr in self.obj: return self[attr] return object.__getattribute__(self, attr) @final @property def _from_selection(self) -> bool: """ Is the resampling from a DataFrame column or MultiIndex level. """ # upsampling and PeriodIndex resampling do not work # with selection, this state used to catch and raise an error return self._timegrouper is not None and ( self._timegrouper.key is not None or self._timegrouper.level is not None ) def _convert_obj(self, obj: NDFrameT) -> NDFrameT: """ Provide any conversions for the object in order to correctly handle. Parameters ---------- obj : Series or DataFrame Returns ------- Series or DataFrame """ return obj._consolidate() def _get_binner_for_time(self): raise AbstractMethodError(self) @final def _get_binner(self): """ Create the BinGrouper, assume that self.set_grouper(obj) has already been called. """ binner, bins, binlabels = self._get_binner_for_time() assert len(bins) == len(binlabels) bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer) return binner, bin_grouper @overload def pipe( self, func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs, ) -> T: ... @overload def pipe( self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any, ) -> T: ... @final def pipe( self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any, ) -> T: """ Apply a ``func`` with arguments to this Resampler object and return its result. Use `.pipe` when you want to improve readability by chaining together functions that expect Series, DataFrames, GroupBy or Resampler objects. Instead of writing >>> h = lambda x, arg2, arg3: x + 1 - arg2 * arg3 >>> g = lambda x, arg1: x * 5 / arg1 >>> f = lambda x: x**4 >>> df = pd.DataFrame([["a", 4], ["b", 5]], columns=["group", "value"]) >>> h(g(f(df.groupby("group")), arg1=1), arg2=2, arg3=3) # doctest: +SKIP You can write >>> ( ... df.groupby("group").pipe(f).pipe(g, arg1=1).pipe(h, arg2=2, arg3=3) ... ) # doctest: +SKIP which is much more readable. Parameters ---------- func : callable or tuple of (callable, str) Function to apply to this Resampler object or, alternatively, a `(callable, data_keyword)` tuple where `data_keyword` is a string indicating the keyword of `callable` that expects the Resampler object. *args : iterable, optional Positional arguments passed into `func`. **kwargs : dict, optional A dictionary of keyword arguments passed into `func`. Returns ------- any The result of applying ``func`` to the Resampler object. See Also -------- Series.pipe : Apply a function with arguments to a series. DataFrame.pipe: Apply a function with arguments to a dataframe. apply : Apply function to each group instead of to the full Resampler object. Notes ----- See more `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_ Examples -------- >>> df = pd.DataFrame( ... {"A": [1, 2, 3, 4]}, index=pd.date_range("2012-08-02", periods=4) ... ) >>> df A 2012-08-02 1 2012-08-03 2 2012-08-04 3 2012-08-05 4 To get the difference between each 2-day period's maximum and minimum value in one pass, you can do >>> df.resample("2D").pipe(lambda x: x.max() - x.min()) A 2012-08-02 1 2012-08-04 1 """ return super().pipe(func, *args, **kwargs) @final def aggregate(self, func=None, *args, **kwargs): """ Aggregate using one or more operations over the specified axis. Parameters ---------- func : function, str, list or dict Function to use for aggregating the data. If a function, must either work when passed a DataFrame or when passed to DataFrame.apply. Accepted combinations are: - function - string function name - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` - dict of axis labels -> functions, function names or list of such. *args Positional arguments to pass to `func`. **kwargs Keyword arguments to pass to `func`. Returns ------- scalar, Series or DataFrame The return can be: * scalar : when Series.agg is called with single function * Series : when DataFrame.agg is called with a single function * DataFrame : when DataFrame.agg is called with several functions See Also -------- DataFrame.groupby.aggregate : Aggregate using callable, string, dict, or list of string/callables. DataFrame.resample.transform : Transforms the Series on each group based on the given function. DataFrame.aggregate: Aggregate using one or more operations over the specified axis. Notes ----- The aggregation operations are always performed over an axis, either the index (default) or the column axis. This behavior is different from `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, `var`), where the default is to compute the aggregation of the flattened array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d, axis=0)``. `agg` is an alias for `aggregate`. Use the alias. Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. A passed user-defined-function will be passed a Series for evaluation. If ``func`` defines an index relabeling, ``axis`` must be ``0`` or ``index``. Examples -------- >>> s = pd.Series( ... [1, 2, 3, 4, 5], index=pd.date_range("20130101", periods=5, freq="s") ... ) >>> s 2013-01-01 00:00:00 1 2013-01-01 00:00:01 2 2013-01-01 00:00:02 3 2013-01-01 00:00:03 4 2013-01-01 00:00:04 5 Freq: s, dtype: int64 >>> r = s.resample("2s") >>> r.agg("sum") 2013-01-01 00:00:00 3 2013-01-01 00:00:02 7 2013-01-01 00:00:04 5 Freq: 2s, dtype: int64 >>> r.agg(["sum", "mean", "max"]) sum mean max 2013-01-01 00:00:00 3 1.5 2 2013-01-01 00:00:02 7 3.5 4 2013-01-01 00:00:04 5 5.0 5 >>> r.agg({"result": lambda x: x.mean() / x.std(), "total": "sum"}) result total 2013-01-01 00:00:00 2.121320 3 2013-01-01 00:00:02 4.949747 7 2013-01-01 00:00:04 NaN 5 >>> r.agg(average="mean", total="sum") average total 2013-01-01 00:00:00 1.5 3 2013-01-01 00:00:02 3.5 7 2013-01-01 00:00:04 5.0 5 """ result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() if result is None: how = func result = self._groupby_and_aggregate(how, *args, **kwargs) return result agg = aggregate apply = aggregate @final def transform(self, arg, *args, **kwargs): """ Call function producing a like-indexed Series on each group. Return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. *args, **kwargs Additional arguments and keywords. Returns ------- Series A Series with the transformed values, maintaining the same index as the original object. See Also -------- core.resample.Resampler.apply : Apply a function along each group. core.resample.Resampler.aggregate : Aggregate using one or more operations over the specified axis. Examples -------- >>> s = pd.Series([1, 2], index=pd.date_range("20180101", periods=2, freq="1h")) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 Freq: h, dtype: int64 >>> resampled = s.resample("15min") >>> resampled.transform(lambda x: (x - x.mean()) / x.std()) 2018-01-01 00:00:00 NaN 2018-01-01 01:00:00 NaN Freq: h, dtype: float64 """ return self._selected_obj.groupby(self._timegrouper).transform( arg, *args, **kwargs ) def _downsample(self, how, **kwargs): raise AbstractMethodError(self) def _upsample(self, f, limit: int | None = None, fill_value=None): raise AbstractMethodError(self) def _gotitem(self, key, ndim: int, subset=None): """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on """ grouper = self._grouper if subset is None: subset = self.obj if key is not None: subset = subset[key] else: # reached via Apply.agg_dict_like with selection=None and ndim=1 assert subset.ndim == 1 if ndim == 1: assert subset.ndim == 1 grouped = get_groupby( subset, by=None, grouper=grouper, group_keys=self.group_keys ) return grouped def _groupby_and_aggregate(self, how, *args, **kwargs): """ Re-evaluate the obj with a groupby aggregation. """ grouper = self._grouper # Excludes `on` column when provided obj = self._obj_with_exclusions grouped = get_groupby(obj, by=None, grouper=grouper, group_keys=self.group_keys) try: if callable(how): # TODO: test_resample_apply_with_additional_args fails if we go # through the non-lambda path, not clear that it should. func = lambda x: how(x, *args, **kwargs) result = grouped.aggregate(func) else: result = grouped.aggregate(how, *args, **kwargs) except (AttributeError, KeyError): # we have a non-reducing function; try to evaluate # alternatively we want to evaluate only a column of the input # test_apply_to_one_column_of_df the function being applied references # a DataFrame column, but aggregate_item_by_item operates column-wise # on Series, raising AttributeError or KeyError # (depending on whether the column lookup uses getattr/__getitem__) result = grouped.apply(how, *args, **kwargs) except ValueError as err: if "Must produce aggregated value" in str(err): # raised in _aggregate_named # see test_apply_without_aggregation, test_apply_with_mutated_index pass else: raise # we have a non-reducing function # try to evaluate result = grouped.apply(how, *args, **kwargs) return self._wrap_result(result) @final def _get_resampler_for_grouping( self, groupby: GroupBy, key, ): """ Return the correct class for resampling with groupby. """ return self._resampler_for_grouping( groupby=groupby, key=key, parent=self, ) def _wrap_result(self, result): """ Potentially wrap any results. """ if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: # When index is all NaT, result is empty but index is not obj = self.obj result.index = _asfreq_compat(obj.index[:0], freq=self.freq) result.name = getattr(obj, "name", None) if self._timegrouper._arrow_dtype is not None: result.index = result.index.astype(self._timegrouper._arrow_dtype) result.index.name = self.obj.index.name return result @final def ffill(self, limit: int | None = None): """ Forward fill the values. This method fills missing values by propagating the last valid observation forward, up to the next valid observation. It is commonly used in time series analysis when resampling data to a higher frequency (upsampling) and filling gaps in the resampled output. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series The resampled data with missing values filled forward. See Also -------- Series.fillna: Fill NA/NaN values using the specified method. DataFrame.fillna: Fill NA/NaN values using the specified method. Examples -------- Here we only create a ``Series``. >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 Example for ``ffill`` with downsampling (we have fewer dates after resampling): >>> ser.resample("MS").ffill() 2023-01-01 1 2023-02-01 3 Freq: MS, dtype: int64 Example for ``ffill`` with upsampling (fill the new dates with the previous value): >>> ser.resample("W").ffill() 2023-01-01 1 2023-01-08 1 2023-01-15 2 2023-01-22 2 2023-01-29 2 2023-02-05 3 2023-02-12 3 2023-02-19 4 Freq: W-SUN, dtype: int64 With upsampling and limiting (only fill the first new date with the previous value): >>> ser.resample("W").ffill(limit=1) 2023-01-01 1.0 2023-01-08 1.0 2023-01-15 2.0 2023-01-22 2.0 2023-01-29 NaN 2023-02-05 3.0 2023-02-12 NaN 2023-02-19 4.0 Freq: W-SUN, dtype: float64 """ return self._upsample("ffill", limit=limit) @final def nearest(self, limit: int | None = None): """ Resample by using the nearest value. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). The `nearest` method will replace ``NaN`` values that appeared in the resampled data with the value from the nearest member of the sequence, based on the index value. Missing values that existed in the original data will not be modified. If `limit` is given, fill only this many values in each direction for each of the original values. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series or DataFrame An upsampled Series or DataFrame with ``NaN`` values filled with their nearest value. See Also -------- bfill : Backward fill the new missing values in the resampled data. ffill : Forward fill ``NaN`` values. Examples -------- >>> s = pd.Series([1, 2], index=pd.date_range("20180101", periods=2, freq="1h")) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 Freq: h, dtype: int64 >>> s.resample("15min").nearest() 2018-01-01 00:00:00 1 2018-01-01 00:15:00 1 2018-01-01 00:30:00 2 2018-01-01 00:45:00 2 2018-01-01 01:00:00 2 Freq: 15min, dtype: int64 Limit the number of upsampled values imputed by the nearest: >>> s.resample("15min").nearest(limit=1) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 Freq: 15min, dtype: float64 """ return self._upsample("nearest", limit=limit) @final def bfill(self, limit: int | None = None): """ Backward fill the new missing values in the resampled data. In statistics, imputation is the process of replacing missing data with substituted values [1]_. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). The backward fill will replace NaN values that appeared in the resampled data with the next value in the original sequence. Missing values that existed in the original data will not be modified. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series, DataFrame An upsampled Series or DataFrame with backward filled NaN values. See Also -------- nearest : Fill NaN values with nearest neighbor starting from center. ffill : Forward fill NaN values. Series.fillna : Fill NaN values in the Series using the specified method, which can be 'backfill'. DataFrame.fillna : Fill NaN values in the DataFrame using the specified method, which can be 'backfill'. References ---------- .. [1] https://en.wikipedia.org/wiki/Imputation_%28statistics%29 Examples -------- Resampling a Series: >>> s = pd.Series( ... [1, 2, 3], index=pd.date_range("20180101", periods=3, freq="h") ... ) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 2018-01-01 02:00:00 3 Freq: h, dtype: int64 >>> s.resample("30min").bfill() 2018-01-01 00:00:00 1 2018-01-01 00:30:00 2 2018-01-01 01:00:00 2 2018-01-01 01:30:00 3 2018-01-01 02:00:00 3 Freq: 30min, dtype: int64 >>> s.resample("15min").bfill(limit=2) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 NaN 2018-01-01 00:30:00 2.0 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 2018-01-01 01:15:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 01:45:00 3.0 2018-01-01 02:00:00 3.0 Freq: 15min, dtype: float64 Resampling a DataFrame that has missing values: >>> df = pd.DataFrame( ... {"a": [2, np.nan, 6], "b": [1, 3, 5]}, ... index=pd.date_range("20180101", periods=3, freq="h"), ... ) >>> df a b 2018-01-01 00:00:00 2.0 1 2018-01-01 01:00:00 NaN 3 2018-01-01 02:00:00 6.0 5 >>> df.resample("30min").bfill() a b 2018-01-01 00:00:00 2.0 1 2018-01-01 00:30:00 NaN 3 2018-01-01 01:00:00 NaN 3 2018-01-01 01:30:00 6.0 5 2018-01-01 02:00:00 6.0 5 >>> df.resample("15min").bfill(limit=2) a b 2018-01-01 00:00:00 2.0 1.0 2018-01-01 00:15:00 NaN NaN 2018-01-01 00:30:00 NaN 3.0 2018-01-01 00:45:00 NaN 3.0 2018-01-01 01:00:00 NaN 3.0 2018-01-01 01:15:00 NaN NaN 2018-01-01 01:30:00 6.0 5.0 2018-01-01 01:45:00 6.0 5.0 2018-01-01 02:00:00 6.0 5.0 """ return self._upsample("bfill", limit=limit) @final def interpolate( self, method: InterpolateOptions = "linear", *, axis: Axis = 0, limit: int | None = None, limit_direction: Literal["forward", "backward", "both"] = "forward", limit_area=None, **kwargs, ): """ Interpolate values between target timestamps according to different methods. The original index is first reindexed to target timestamps (see :meth:`core.resample.Resampler.asfreq`), then the interpolation of ``NaN`` values via :meth:`DataFrame.interpolate` happens. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to `scipy.interpolate.UnivariateSpline`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. Note that, `slinear` method in Pandas refers to the Scipy first order `spline` instead of Pandas first order `spline`. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives`. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. For `Series` this parameter is unused and defaults to 0. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). **kwargs : optional Keyword arguments to pass on to the interpolating function. Returns ------- DataFrame or Series Interpolated values at the specified freq. See Also -------- core.resample.Resampler.asfreq: Return the values at the new freq, essentially a reindex. DataFrame.interpolate: Fill NaN values using an interpolation method. DataFrame.bfill : Backward fill NaN values in the resampled data. DataFrame.ffill : Forward fill NaN values. Notes ----- For high-frequent or non-equidistant time-series with timestamps the reindexing followed by interpolation may lead to information loss as shown in the last example. Examples -------- >>> start = "2023-03-01T07:00:00" >>> timesteps = pd.date_range(start, periods=5, freq="s") >>> series = pd.Series(data=[1, -1, 2, 1, 3], index=timesteps) >>> series 2023-03-01 07:00:00 1 2023-03-01 07:00:01 -1 2023-03-01 07:00:02 2 2023-03-01 07:00:03 1 2023-03-01 07:00:04 3 Freq: s, dtype: int64 Downsample the dataframe to 0.5Hz by providing the period time of 2s. >>> series.resample("2s").interpolate("linear") 2023-03-01 07:00:00 1 2023-03-01 07:00:02 2 2023-03-01 07:00:04 3 Freq: 2s, dtype: int64 Upsample the dataframe to 2Hz by providing the period time of 500ms. >>> series.resample("500ms").interpolate("linear") 2023-03-01 07:00:00.000 1.0 2023-03-01 07:00:00.500 0.0 2023-03-01 07:00:01.000 -1.0 2023-03-01 07:00:01.500 0.5 2023-03-01 07:00:02.000 2.0 2023-03-01 07:00:02.500 1.5 2023-03-01 07:00:03.000 1.0 2023-03-01 07:00:03.500 2.0 2023-03-01 07:00:04.000 3.0 Freq: 500ms, dtype: float64 Internal reindexing with ``asfreq()`` prior to interpolation leads to an interpolated timeseries on the basis of the reindexed timestamps (anchors). It is assured that all available datapoints from original series become anchors, so it also works for resampling-cases that lead to non-aligned timestamps, as in the following example: >>> series.resample("400ms").interpolate("linear") 2023-03-01 07:00:00.000 1.000000 2023-03-01 07:00:00.400 0.333333 2023-03-01 07:00:00.800 -0.333333 2023-03-01 07:00:01.200 0.000000 2023-03-01 07:00:01.600 1.000000 2023-03-01 07:00:02.000 2.000000 2023-03-01 07:00:02.400 1.666667 2023-03-01 07:00:02.800 1.333333 2023-03-01 07:00:03.200 1.666667 2023-03-01 07:00:03.600 2.333333 2023-03-01 07:00:04.000 3.000000 Freq: 400ms, dtype: float64 Note that the series correctly decreases between two anchors ``07:00:00`` and ``07:00:02``. """ if "inplace" in kwargs: # GH#58690 warnings.warn( f"The 'inplace' keyword in {type(self).__name__}.interpolate " "is deprecated and will be removed in a future version. " "resample(...).interpolate is never inplace.", Pandas4Warning, stacklevel=find_stack_level(), ) inplace = kwargs.pop("inplace") if inplace: raise ValueError("Cannot interpolate inplace on a resampled object.") result = self._upsample("asfreq") # If the original data has timestamps which are not aligned with the # target timestamps, we need to add those points back to the data frame # that is supposed to be interpolated. This does not work with # PeriodIndex, so we skip this case. GH#21351 obj = self._selected_obj is_period_index = isinstance(obj.index, PeriodIndex) # Skip this step for PeriodIndex if not is_period_index: final_index = result.index if isinstance(final_index, MultiIndex): raise NotImplementedError( "Direct interpolation of MultiIndex data frames is not " "supported. If you tried to resample and interpolate on a " "grouped data frame, please use:\n" "`df.groupby(...).apply(lambda x: x.resample(...)." "interpolate(...))`" "\ninstead, as resampling and interpolation has to be " "performed for each group independently." ) missing_data_points_index = obj.index.difference(final_index) if len(missing_data_points_index) > 0: result = concat( [result, obj.loc[missing_data_points_index]] ).sort_index() result_interpolated = result.interpolate( method=method, axis=axis, limit=limit, inplace=False, limit_direction=limit_direction, limit_area=limit_area, **kwargs, ) # No further steps if the original data has a PeriodIndex if is_period_index: return result_interpolated # Make sure that original data points which do not align with the # resampled index are removed result_interpolated = result_interpolated.loc[final_index] # Make sure frequency indexes are preserved result_interpolated.index = final_index return result_interpolated @final def asfreq(self, fill_value=None): """ Return the values at the new freq, essentially a reindex. Parameters ---------- fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- DataFrame or Series Values at the specified freq. See Also -------- Series.asfreq: Convert TimeSeries to specified frequency. DataFrame.asfreq: Convert TimeSeries to specified frequency. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-31", "2023-02-01", "2023-02-28"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-31 2 2023-02-01 3 2023-02-28 4 dtype: int64 >>> ser.resample("MS").asfreq() 2023-01-01 1 2023-02-01 3 Freq: MS, dtype: int64 """ return self._upsample("asfreq", fill_value=fill_value) @final def sum( self, numeric_only: bool = False, min_count: int = 0, ): """ Compute sum of group values. This method provides a simple way to compute the sum of values within each resampled group, particularly useful for aggregating time-based data into daily, monthly, or yearly sums. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts ``None``. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. Returns ------- Series or DataFrame Computed sum of values within each group. See Also -------- core.resample.Resampler.mean : Compute mean of groups, excluding missing values. core.resample.Resampler.count : Compute count of group, excluding missing values. DataFrame.resample : Resample time-series data. Series.sum : Return the sum of the values over the requested axis. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.resample("MS").sum() 2023-01-01 3 2023-02-01 7 Freq: MS, dtype: int64 """ return self._downsample("sum", numeric_only=numeric_only, min_count=min_count) @final def prod( self, numeric_only: bool = False, min_count: int = 0, ): """ Compute prod of group values. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts ``None``. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. Returns ------- Series or DataFrame Computed prod of values within each group. See Also -------- core.resample.Resampler.sum : Compute sum of groups, excluding missing values. core.resample.Resampler.mean : Compute mean of groups, excluding missing values. core.resample.Resampler.median : Compute median of groups, excluding missing values. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.resample("MS").prod() 2023-01-01 2 2023-02-01 12 Freq: MS, dtype: int64 """ return self._downsample("prod", numeric_only=numeric_only, min_count=min_count) @final def min( self, numeric_only: bool = False, min_count: int = 0, ): """ Compute min value of group. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts ``None``. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. Returns ------- Series or DataFrame Compute the minimum value in the given Series or DataFrame. See Also -------- core.resample.Resampler.max : Compute max value of group. core.resample.Resampler.mean : Compute mean of groups, excluding missing values. core.resample.Resampler.median : Compute median of groups, excluding missing values. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.resample("MS").min() 2023-01-01 1 2023-02-01 3 Freq: MS, dtype: int64 """ return self._downsample("min", numeric_only=numeric_only, min_count=min_count) @final def max( self, numeric_only: bool = False, min_count: int = 0, ): """ Compute max value of group. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts ``None``. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. Returns ------- Series or DataFrame Computes the maximum value in the given Series or Dataframe. See Also -------- core.resample.Resampler.min : Compute min value of group. core.resample.Resampler.mean : Compute mean of groups, excluding missing values. core.resample.Resampler.median : Compute median of groups, excluding missing values. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.resample("MS").max() 2023-01-01 2 2023-02-01 4 Freq: MS, dtype: int64 """ return self._downsample("max", numeric_only=numeric_only, min_count=min_count) @final def first( self, numeric_only: bool = False, min_count: int = 0, skipna: bool = True, ): """ Compute the first non-null entry of each column. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. skipna : bool, default True Exclude NA/null values. If an entire group is NA, the result will be NA. Returns ------- Series or DataFrame First values within each group. See Also -------- core.resample.Resampler.last : Compute the last non-null value in each group. core.resample.Resampler.mean : Compute mean of groups, excluding missing values. Examples -------- >>> s = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> s 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> s.resample("MS").first() 2023-01-01 1 2023-02-01 3 Freq: MS, dtype: int64 """ return self._downsample( "first", numeric_only=numeric_only, min_count=min_count, skipna=skipna ) @final def last( self, numeric_only: bool = False, min_count: int = 0, skipna: bool = True, ): """ Compute the last non-null entry of each column. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. skipna : bool, default True Exclude NA/null values. If an entire group is NA, the result will be NA. Returns ------- Series or DataFrame Last of values within each group. See Also -------- core.resample.Resampler.first : Compute the first non-null value in each group. core.resample.Resampler.mean : Compute mean of groups, excluding missing values. Examples -------- >>> s = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> s 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> s.resample("MS").last() 2023-01-01 2 2023-02-01 4 Freq: MS, dtype: int64 """ return self._downsample( "last", numeric_only=numeric_only, min_count=min_count, skipna=skipna ) @final def median(self, numeric_only: bool = False): """ Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts ``None`` and defaults to False. Returns ------- Series or DataFrame Median of values within each group. See Also -------- Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 3, 4, 5], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").median() 2023-01-01 2.0 2023-02-01 4.0 Freq: MS, dtype: float64 """ return self._downsample("median", numeric_only=numeric_only) @final def mean( self, numeric_only: bool = False, ): """ Compute mean of groups, excluding missing values. Parameters ---------- numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionchanged:: 2.0.0 numeric_only now defaults to ``False``. Returns ------- DataFrame or Series Mean of values within each group. See Also -------- core.resample.Resampler.median : Compute median of groups, excluding missing values. core.resample.Resampler.sum : Compute sum of groups, excluding missing values. core.resample.Resampler.std : Compute standard deviation of groups, excluding missing values. core.resample.Resampler.var : Compute variance of groups, excluding missing values. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.resample("MS").mean() 2023-01-01 1.5 2023-02-01 3.5 Freq: MS, dtype: float64 """ return self._downsample("mean", numeric_only=numeric_only) @final def std( self, ddof: int = 1, numeric_only: bool = False, ): """ Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : int, default 1 Degrees of freedom. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionchanged:: 2.0.0 numeric_only now defaults to ``False``. Returns ------- DataFrame or Series Standard deviation of values within each group. See Also -------- core.resample.Resampler.mean : Compute mean of groups, excluding missing values. core.resample.Resampler.median : Compute median of groups, excluding missing values. core.resample.Resampler.var : Compute variance of groups, excluding missing values. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 8], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").std() 2023-01-01 1.000000 2023-02-01 2.645751 Freq: MS, dtype: float64 """ return self._downsample("std", ddof=ddof, numeric_only=numeric_only) @final def var( self, ddof: int = 1, numeric_only: bool = False, ): """ Compute variance of groups, excluding missing values. Parameters ---------- ddof : int, default 1 Degrees of freedom. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionchanged:: 2.0.0 numeric_only now defaults to ``False``. Returns ------- DataFrame or Series Variance of values within each group. See Also -------- core.resample.Resampler.std : Compute standard deviation of groups, excluding missing values. core.resample.Resampler.mean : Compute mean of groups, excluding missing values. core.resample.Resampler.median : Compute median of groups, excluding missing values. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 8], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").var() 2023-01-01 1.0 2023-02-01 7.0 Freq: MS, dtype: float64 >>> ser.resample("MS").var(ddof=0) 2023-01-01 0.666667 2023-02-01 4.666667 Freq: MS, dtype: float64 """ return self._downsample("var", ddof=ddof, numeric_only=numeric_only) @final def sem( self, ddof: int = 1, numeric_only: bool = False, ): """ Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : int, default 1 Degrees of freedom. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. .. versionchanged:: 2.0.0 numeric_only now defaults to ``False``. Returns ------- Series or DataFrame Standard error of the mean of values within each group. See Also -------- DataFrame.sem : Return unbiased standard error of the mean over requested axis. Series.sem : Return unbiased standard error of the mean over requested axis. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 8], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").sem() 2023-01-01 0.577350 2023-02-01 1.527525 Freq: MS, dtype: float64 """ return self._downsample("sem", ddof=ddof, numeric_only=numeric_only) @final def ohlc(self): """ Compute open, high, low and close values of a group, excluding missing values. Returns ------- DataFrame Open, high, low and close values within each group. See Also -------- DataFrame.agg : Aggregate using one or more operations over the specified axis. DataFrame.resample : Resample time-series data. DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 5], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").ohlc() open high low close 2023-01-01 1 3 1 2 2023-02-01 4 5 3 5 """ ax = self.ax obj = self._obj_with_exclusions if len(ax) == 0: # GH#42902 obj = obj.copy() obj.index = _asfreq_compat(obj.index, self.freq) if obj.ndim == 1: obj = obj.to_frame() obj = obj.reindex(["open", "high", "low", "close"], axis=1) else: mi = MultiIndex.from_product( [obj.columns, ["open", "high", "low", "close"]] ) obj = obj.reindex(mi, axis=1) return obj return self._downsample("ohlc") @final def nunique(self): """ Return number of unique elements in the group. Returns ------- Series Number of unique values within each group. See Also -------- core.groupby.SeriesGroupBy.nunique : Method nunique for SeriesGroupBy. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 3], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 3 dtype: int64 >>> ser.resample("MS").nunique() 2023-01-01 2 2023-02-01 1 Freq: MS, dtype: int64 """ return self._downsample("nunique") @final def size(self): """ Compute group sizes. Returns ------- Series Number of rows in each group. See Also -------- Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. Examples -------- >>> ser = pd.Series( ... [1, 2, 3], ... index=pd.DatetimeIndex(["2023-01-01", "2023-01-15", "2023-02-01"]), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 dtype: int64 >>> ser.resample("MS").size() 2023-01-01 2 2023-02-01 1 Freq: MS, dtype: int64 """ result = self._downsample("size") # If the result is a non-empty DataFrame we stack to get a Series # GH 46826 if isinstance(result, ABCDataFrame) and not result.empty: result = result.stack() if not len(self.ax): from pandas import Series if self._selected_obj.ndim == 1: name = self._selected_obj.name else: name = None result = Series([], index=result.index, dtype="int64", name=name) return result @final def count(self): """ Compute count of group, excluding missing values. Returns ------- Series or DataFrame Count of values within each group. See Also -------- Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.resample("MS").count() 2023-01-01 2 2023-02-01 2 Freq: MS, dtype: int64 """ result = self._downsample("count") if not len(self.ax): if self._selected_obj.ndim == 1: result = type(self._selected_obj)( [], index=result.index, dtype="int64", name=self._selected_obj.name ) else: from pandas import DataFrame result = DataFrame( [], index=result.index, columns=result.columns, dtype="int64" ) return result @final def quantile(self, q: float | list[float] | AnyArrayLike = 0.5, **kwargs): """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Returns ------- DataFrame or Series Quantile of values within each group. See Also -------- Series.quantile Return a series, where the index is q and the values are the quantiles. DataFrame.quantile Return a DataFrame, where the columns are the columns of self, and the values are the quantiles. DataFrameGroupBy.quantile Return a DataFrame, where the columns are groupby columns, and the values are its quantiles. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 8], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").quantile() 2023-01-01 2.0 2023-02-01 4.0 Freq: MS, dtype: float64 >>> ser.resample("MS").quantile(0.25) 2023-01-01 1.5 2023-02-01 3.5 Freq: MS, dtype: float64 """ return self._downsample("quantile", q=q, **kwargs)
Resampler
python
huggingface__transformers
tests/models/pegasus_x/test_modeling_pegasus_x.py
{ "start": 7785, "end": 23548 }
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PegasusXModel, PegasusXForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": PegasusXModel, "summarization": PegasusXForConditionalGeneration, "text2text-generation": PegasusXForConditionalGeneration, "translation": PegasusXForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True test_missing_keys = False def setUp(self): self.model_tester = PegasusXModelTester(self) self.config_tester = ConfigTester(self, config_class=PegasusXConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], set()) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (PegasusXModel, PegasusXForConditionalGeneration): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = PegasusXForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0]["local"].shape[-4:]), [ self.model_tester.num_attention_heads, math.ceil(encoder_seq_length / model.config.block_size), model.config.block_size, model.config.block_size + model.config.num_global_tokens, ], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0]["local"].shape[-4:]), [ self.model_tester.num_attention_heads, math.ceil(encoder_seq_length / model.config.block_size), model.config.block_size, model.config.block_size + model.config.num_global_tokens, ], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length): encoder_expected_shape = ( batch_size, config.num_attention_heads, math.ceil(prompt_length / config.block_size), config.block_size, config.block_size + config.num_global_tokens, ) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions["local"].shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, prompt_length): encoder_expected_shape = (batch_size, self.round_up(prompt_length, config.block_size), config.hidden_size) self.assertIsInstance(hidden_states, tuple) # Only the last layer will have the hidden states truncated back to token level self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states[:-1]], [encoder_expected_shape] * (len(hidden_states) - 1), ) # Only the last layer will have the hidden states truncated back to token level self.assertEqual( hidden_states[-1][0].shape, (batch_size, prompt_length, config.hidden_size), ) def test_hidden_states_output(self): def _check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.round_up(seq_length, config.block_size), self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True _check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True _check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions["local"].retain_grad() encoder_attentions["global"].retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions["local"].grad) self.assertIsNotNone(encoder_attentions["global"].grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) @classmethod def round_up(cls, n, k): return math.ceil(n / k) * k def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise Exception except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow
PegasusXModelTest
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_hyperlink19.py
{ "start": 370, "end": 1253 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("hyperlink19.xlsx") self.ignore_files = [ "xl/calcChain.xml", "[Content_Types].xml", "xl/_rels/workbook.xml.rels", ] def test_create_file(self): """Test the creation of a simple XlsxWriter file with hyperlinks.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.write_url("A1", "http://www.perl.com/") # Maintain the link but overwrite string with a formula. worksheet.write_formula("A1", "=1+1", None, 2) # Reset the SST for testing. workbook.str_table = SharedStringTable() workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
mlflow__mlflow
mlflow/server/auth/db/models.py
{ "start": 1356, "end": 1948 }
class ____(Base): __tablename__ = "experiment_permissions" id = Column(Integer(), primary_key=True) experiment_id = Column(String(255), nullable=False) user_id = Column(Integer, ForeignKey("users.id"), nullable=False) permission = Column(String(255)) __table_args__ = (UniqueConstraint("experiment_id", "user_id", name="unique_experiment_user"),) def to_mlflow_entity(self): return ExperimentPermission( experiment_id=self.experiment_id, user_id=self.user_id, permission=self.permission, )
SqlExperimentPermission
python
scrapy__scrapy
tests/test_utils_signal.py
{ "start": 3363, "end": 3631 }
class ____(TestSendCatchLogAsync): async def ok_handler(self, arg, handlers_called): handlers_called.add(self.ok_handler) assert arg == "test" await defer.succeed(42) return "OK" @pytest.mark.only_asyncio
TestSendCatchLogAsyncAsyncDef
python
numba__numba
numba/tests/test_debug.py
{ "start": 618, "end": 1120 }
class ____(object): def __init__(self): self.h = 5 simple_class_spec = [('h', types.int32)] def simple_class_user(obj): return obj.h def unsupported_parfor(a, b): return np.dot(a, b) # dot as gemm unsupported def supported_parfor(n): a = np.ones(n) for i in prange(n): a[i] = a[i] + np.sin(i) return a def unsupported_prange(n): a = np.ones(n) for i in prange(n): a[i] = a[i] + np.sin(i) assert i + 13 < 100000 return a
SimpleClass
python
psf__black
tests/data/cases/class_blank_parentheses.py
{ "start": 184, "end": 423 }
class ____(object): def func_with_blank_parentheses(): return 5 def public_func_with_blank_parentheses(): return None def class_under_the_func_with_blank_parentheses(): class InsideFunc(): pass
ClassWithEmptyFunc
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_init.py
{ "start": 1527, "end": 2983 }
class ____(FSDPTestMultiThread): """Tests that tensor parameters are moved to the expected device.""" @property def world_size(self) -> int: return 1 @skip_if_lt_x_gpu(1) def test_move_states_to_device_tensor(self): model = MLP(8, torch.device("cpu"), with_buffer=True) for tensor in itertools.chain(model.parameters(), model.buffers()): self.assertEqual(tensor.device, torch.device("cpu")) fully_shard(model) accelerator_device = torch.device( device_type.type, torch.get_device_module(device_type).current_device() ) for tensor in itertools.chain(model.parameters(), model.buffers()): self.assertEqual(tensor.device, accelerator_device) @skip_if_lt_x_gpu(1) def test_move_states_to_device_ignored_param_device(self): cpu_device = torch.device("cpu") model = MLP(8, cpu_device, with_buffer=True) ignored_params = [model.out_proj.weight, model.out_proj.bias] fully_shard(model, ignored_params=set(ignored_params)) for tensor in ignored_params: self.assertEqual(tensor.device, cpu_device) accelerator_device = torch.device( device_type.type, torch.get_device_module(device_type).current_device() ) model.to(device_type) for tensor in ignored_params: self.assertEqual(tensor.device, accelerator_device)
TestFullyShardDeviceTensor
python
pydantic__pydantic
pydantic/_internal/_discriminated_union.py
{ "start": 414, "end": 2429 }
class ____(Exception): """Raised when applying a discriminated union discriminator to a schema requires a definition that is not yet defined """ def __init__(self, ref: str) -> None: self.ref = ref super().__init__(f'Missing definition for ref {self.ref!r}') def set_discriminator_in_metadata(schema: CoreSchema, discriminator: Any) -> None: metadata = cast('CoreMetadata', schema.setdefault('metadata', {})) metadata['pydantic_internal_union_discriminator'] = discriminator def apply_discriminator( schema: core_schema.CoreSchema, discriminator: str | Discriminator, definitions: dict[str, core_schema.CoreSchema] | None = None, ) -> core_schema.CoreSchema: """Applies the discriminator and returns a new core schema. Args: schema: The input schema. discriminator: The name of the field which will serve as the discriminator. definitions: A mapping of schema ref to schema. Returns: The new core schema. Raises: TypeError: - If `discriminator` is used with invalid union variant. - If `discriminator` is used with `Union` type with one variant. - If `discriminator` value mapped to multiple choices. MissingDefinitionForUnionRef: If the definition for ref is missing. PydanticUserError: - If a model in union doesn't have a discriminator field. - If discriminator field has a non-string alias. - If discriminator fields have different aliases. - If discriminator field not of type `Literal`. """ from ..types import Discriminator if isinstance(discriminator, Discriminator): if isinstance(discriminator.discriminator, str): discriminator = discriminator.discriminator else: return discriminator._convert_schema(schema) return _ApplyInferredDiscriminator(discriminator, definitions or {}).apply(schema)
MissingDefinitionForUnionRef
python
geekcomputers__Python
blackJackGUI.py
{ "start": 2360, "end": 4395 }
class ____: def __init__(self): self.Deck = [Card(suit, rank) for suit in SUITS for rank in RANKS] def shuffle(self): random.shuffle(self.Deck) def deal_card(self): return random.choice(self.Deck) def __str__(self): return string_list_join("Deck", self.Deck) def deal(): global outcome, in_play, score1, score2, player_card, dealer_card, deck outcome = "" player_card = Hand() dealer_card = Hand() deck = Deck() for i in range(2): player_card.add_card(deck.deal_card()) dealer_card.add_card(deck.deal_card()) in_play = True score1 = str(player_card.get_value()) score2 = str(dealer_card.get_value()) def stand(): if in_play == True: while dealer_card.get_value() < 17: dealer_card.add_card(deck.deal_card()) if dealer_card.get_value() > 21: outcome = "you won!!" elif player_card.get_value() <= dealer_card.get_value(): outcome = "you lose" else: outcome = "you won!!" score1 = str(player_card.get_value()) score2 = str(dealer_card.get_value()) def hit(): global outcome, in_play, score1, score2, player_card, dealer_card, deck if in_play == True: player_card.add_card(deck.deal_card()) if player_card.get_value() > 21: outcome = "you are busted" in_play = False score1 = str(player_card.get_value()) score2 = str(dealer_card.get_value()) def draw(canvas): canvas.draw_text(outcome, [250, 150], 25, "White") canvas.draw_text("BlackJack", [250, 50], 40, "Black") canvas.draw_text(score1, [100, 100], 40, "Red") player_card.draw(canvas, [20, 300]) dealer_card.draw(canvas, [300, 300]) canvas.draw_text(score2, [400, 100], 40, "Red") frame = simplegui.create_frame("Blackjack", 600, 600) frame.set_canvas_background("Green") frame.add_button("Deal", deal, 200) frame.add_button("Hit", hit, 200) frame.add_button("Stand", stand, 200) frame.set_draw_handler(draw) deal() frame.start()
Deck
python
walkccc__LeetCode
solutions/2971. Find Polygon With the Largest Perimeter/2971.py
{ "start": 0, "end": 347 }
class ____: def largestPerimeter(self, nums: list[int]) -> int: prefix = sum(nums) for num in sorted(nums, reverse=True): prefix -= num # Let `num` be the longest side. Check if the sum of all the edges with # length no longer than `num` > `num``. if prefix > num: return prefix + num return -1
Solution
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/array_ops/array_ops_test.py
{ "start": 34026, "end": 39302 }
class ____(test_util.TensorFlowTestCase): """Test the shape inference of StridedSliceShapes.""" def testUnknown(self): with test_util.device(use_gpu=True): @def_function.function def f(x): y = x[...] self.assertAllEqual(y.get_shape().ndims, None) _ = f.get_concrete_function(tensor_lib.TensorSpec(None, dtypes.float32)) def testScalarInput(self): c = constant_op.constant(3) with self.assertRaisesRegex( (ValueError, errors.InvalidArgumentError), "Attempting to slice scalar input.", ): array_ops.strided_slice(c, [0], [1]) def tensorShapeEqual(self, x, y): self.assertTrue(x is not None and y is not None or x is None and y is None) self.assertEqual(x.as_list(), y.as_list()) def testTensorShapeUncertain(self): with test_util.device(use_gpu=True): @def_function.function def f1(x): y = x[3:5] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2, None, 7])) _ = f1.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) @def_function.function def f2(x): y = x[3:5, :, 4] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2, None])) _ = f2.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) @def_function.function def f3(x): y = x[3:5, 3:4, 4] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2, None])) _ = f3.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) @def_function.function def f4(x): y = x[3:5, :, 5:10] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2, None, 2])) _ = f4.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) @def_function.function def f5(x): y = x[3:5, :, 50:3] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2, None, 0])) _ = f5.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) @def_function.function def f6(x): y = x[3:5, :, array_ops.newaxis, 50:3,] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2, None, 1, 0])) _ = f6.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) @def_function.function def f7(x): y = x[1:5:2, :, array_ops.newaxis, 50:3,] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2, None, 1, 0])) _ = f7.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) @def_function.function def f8(x): y = x[:5:3, :, array_ops.newaxis, 50:3,] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([2, None, 1, 0])) _ = f8.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) @def_function.function def f9(x): y = x[:2:3, :, array_ops.newaxis, 50:3,] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([1, None, 1, 0])) _ = f9.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) @def_function.function def f10(x): y = x[::-1, :, array_ops.newaxis, ::-2] self.tensorShapeEqual(y.get_shape(), tensor_shape.TensorShape([5, None, 1, 4])) _ = f10.get_concrete_function( tensor_lib.TensorSpec((5, None, 7), dtypes.float32)) def testTensorValuedIndexShape(self): with self.session(): @def_function.function def f1(x, y): z = x[y] self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([3, 7])) _ = f1.get_concrete_function( tensor_lib.TensorSpec((5, 3, 7)), tensor_lib.TensorSpec((), dtypes.int32)) @def_function.function def f2(x, y): z = x[y, ::-1] self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([3, 7])) _ = f2.get_concrete_function( tensor_lib.TensorSpec((5, 3, 7)), tensor_lib.TensorSpec((), dtypes.int32)) @def_function.function def f3(x, y): z = x[y, ::-2] self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([2, 7])) _ = f3.get_concrete_function( tensor_lib.TensorSpec((5, 3, 7)), tensor_lib.TensorSpec((), dtypes.int32)) @def_function.function def f4(x, y, s): z = x[y, s:2] self.tensorShapeEqual(z.get_shape(), tensor_shape.TensorShape([None, 7])) _ = f4.get_concrete_function( tensor_lib.TensorSpec((5, 3, 7)), tensor_lib.TensorSpec((), dtypes.int32), tensor_lib.TensorSpec((), dtypes.int32))
StridedSliceShapeTest
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
{ "start": 2039, "end": 2204 }
class ____(graphene.Interface): markerStart = graphene.String() markerEnd = graphene.String() class Meta: name = "MarkerEvent"
GrapheneMarkerEvent
python
scipy__scipy
scipy/optimize/tests/test_lsq_common.py
{ "start": 3954, "end": 7050 }
class ____: def setup_method(self): self.J = np.array([ [0.1, 0.2], [-1.0, 1.0], [0.5, 0.2]]) self.g = np.array([0.8, -2.0]) self.diag = np.array([1.0, 2.0]) def test_build_quadratic_1d(self): s = np.zeros(2) a, b = build_quadratic_1d(self.J, self.g, s) assert_equal(a, 0) assert_equal(b, 0) a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) assert_equal(a, 0) assert_equal(b, 0) s = np.array([1.0, -1.0]) a, b = build_quadratic_1d(self.J, self.g, s) assert_equal(a, 2.05) assert_equal(b, 2.8) a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) assert_equal(a, 3.55) assert_equal(b, 2.8) s0 = np.array([0.5, 0.5]) a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0) assert_equal(a, 3.55) assert_allclose(b, 2.39) assert_allclose(c, -0.1525) def test_minimize_quadratic_1d(self): a = 5 b = -1 t, y = minimize_quadratic_1d(a, b, 1, 2) assert_equal(t, 1) assert_allclose(y, a * t**2 + b * t, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -2, -1) assert_equal(t, -1) assert_allclose(y, a * t**2 + b * t, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -1, 1) assert_equal(t, 0.1) assert_allclose(y, a * t**2 + b * t, rtol=1e-15) c = 10 t, y = minimize_quadratic_1d(a, b, -1, 1, c=c) assert_equal(t, 0.1) assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c) assert_equal(t, 0.1) assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c) assert_equal(t, 0.1) assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c) assert_equal(t, 0) assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15) a = -1 b = 0.2 t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf) assert_equal(y, -np.inf) t, y = minimize_quadratic_1d(a, b, 0, np.inf) assert_equal(t, np.inf) assert_equal(y, -np.inf) t, y = minimize_quadratic_1d(a, b, -np.inf, 0) assert_equal(t, -np.inf) assert_equal(y, -np.inf) def test_evaluate_quadratic(self): s = np.array([1.0, -1.0]) value = evaluate_quadratic(self.J, self.g, s) assert_equal(value, 4.85) value = evaluate_quadratic(self.J, self.g, s, diag=self.diag) assert_equal(value, 6.35) s = np.array([[1.0, -1.0], [1.0, 1.0], [0.0, 0.0]]) values = evaluate_quadratic(self.J, self.g, s) assert_allclose(values, [4.85, -0.91, 0.0]) values = evaluate_quadratic(self.J, self.g, s, diag=self.diag) assert_allclose(values, [6.35, 0.59, 0.0])
TestQuadraticFunction
python
huggingface__transformers
src/transformers/models/bridgetower/modeling_bridgetower.py
{ "start": 74556, "end": 74950 }
class ____(nn.Module): def __init__(self, hidden_size, embed_size): super().__init__() self.fc = nn.Linear(hidden_size, embed_size) def forward(self, x): x = self.fc(x) return x @auto_docstring( custom_intro=""" BridgeTower Model with a image-text contrastive head on top computing image-text contrastive loss. """ )
BridgeTowerContrastiveHead
python
tensorflow__tensorflow
tensorflow/python/compiler/tensorrt/test/neighboring_engine_test.py
{ "start": 1125, "end": 2259 }
class ____(trt_test.TfTrtIntegrationTestBase): """Neighboring node wiring tests in TF-TRT conversion.""" def GraphFn(self, x): dtype = x.dtype e = constant_op.constant( np.random.normal(.3, 0.05, [3, 2, 3, 4]), name="weights", dtype=dtype) conv = nn.conv2d( input=x, filter=e, data_format="NCHW", strides=[1, 1, 1, 1], padding="VALID", name="conv") b = constant_op.constant( np.random.normal(1.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype) t = math_ops.mul(conv, b, name="mul") e = self.trt_incompatible_op(conv, name="incompatible") t = math_ops.sub(t, e, name="sub") return array_ops.squeeze(t, name="output_0") def GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 3, 7, 5]], [[2, 4, 5, 4]]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_000": ["bias", "mul", "sub"], "TRTEngineOp_001": ["weights", "conv"] } if __name__ == "__main__": test.main()
NeighboringEngineTest
python
Delgan__loguru
tests/exceptions/source/diagnose/unprintable_object.py
{ "start": 138, "end": 307 }
class ____: def __repr__(self): raise RuntimeError("No way!") try: obj = Object() obj + 1 / 0 except ZeroDivisionError: logger.exception("")
Object
python
run-llama__llama_index
llama-index-core/llama_index/core/callbacks/token_counting.py
{ "start": 587, "end": 4166 }
class ____: prompt: str completion: str completion_token_count: int prompt_token_count: int total_token_count: int = 0 event_id: str = "" def __post_init__(self) -> None: self.total_token_count = self.prompt_token_count + self.completion_token_count def get_tokens_from_response( response: Union["CompletionResponse", "ChatResponse"], ) -> Tuple[int, int]: """Get the token counts from a raw response.""" raw_response = response.raw if not isinstance(raw_response, dict): raw_response = dict(raw_response or {}) usage = raw_response.get("usage", raw_response.get("usage_metadata", {})) if usage is None: usage = response.additional_kwargs if not usage: return 0, 0 if not isinstance(usage, dict): usage = usage.model_dump() possible_input_keys = ("prompt_tokens", "input_tokens", "prompt_token_count") possible_output_keys = ( "completion_tokens", "output_tokens", "candidates_token_count", ) prompt_tokens = 0 for input_key in possible_input_keys: if input_key in usage: prompt_tokens = usage[input_key] break completion_tokens = 0 for output_key in possible_output_keys: if output_key in usage: completion_tokens = usage[output_key] break return prompt_tokens, completion_tokens def get_llm_token_counts( token_counter: TokenCounter, payload: Dict[str, Any], event_id: str = "" ) -> TokenCountingEvent: from llama_index.core.llms import ChatMessage if EventPayload.PROMPT in payload: prompt = payload.get(EventPayload.PROMPT) completion = payload.get(EventPayload.COMPLETION) if completion: # get from raw or additional_kwargs prompt_tokens, completion_tokens = get_tokens_from_response(completion) else: prompt_tokens, completion_tokens = 0, 0 if prompt_tokens == 0: prompt_tokens = token_counter.get_string_tokens(str(prompt)) if completion_tokens == 0: completion_tokens = token_counter.get_string_tokens(str(completion)) return TokenCountingEvent( event_id=event_id, prompt=str(prompt), prompt_token_count=prompt_tokens, completion=str(completion), completion_token_count=completion_tokens, ) elif EventPayload.MESSAGES in payload: messages = cast(List[ChatMessage], payload.get(EventPayload.MESSAGES, [])) messages_str = "\n".join([str(x) for x in messages]) response = payload.get(EventPayload.RESPONSE) response_str = str(response) if response: prompt_tokens, completion_tokens = get_tokens_from_response(response) else: prompt_tokens, completion_tokens = 0, 0 if prompt_tokens == 0: prompt_tokens = token_counter.estimate_tokens_in_messages(messages) if completion_tokens == 0: completion_tokens = token_counter.get_string_tokens(response_str) return TokenCountingEvent( event_id=event_id, prompt=messages_str, prompt_token_count=prompt_tokens, completion=response_str, completion_token_count=completion_tokens, ) else: return TokenCountingEvent( event_id=event_id, prompt="", prompt_token_count=0, completion="", completion_token_count=0, )
TokenCountingEvent
python
dask__distributed
distributed/protocol/tests/test_pickle.py
{ "start": 7083, "end": 9192 }
class ____: def __init__(self) -> None: self.stuff = {"foo": NoPickle()} def test_nopickle_nested(): nested_obj = [NoPickle()] with pytest.raises(TypeError, match="nope"): dumps(nested_obj) with pytest.raises(TypeError, match="nope"): dumps(NestedNoPickle()) dask_serialize.register(NoPickle)(_serialize_nopickle) dask_deserialize.register(NoPickle)(_deserialize_nopickle) try: obj = NestedNoPickle() roundtrip = loads(dumps(obj)) assert roundtrip is not obj assert isinstance(roundtrip.stuff["foo"], NoPickle) roundtrip = loads(dumps(nested_obj)) assert roundtrip is not nested_obj assert isinstance(roundtrip[0], NoPickle) finally: del dask_serialize._lookup[NoPickle] del dask_deserialize._lookup[NoPickle] @pytest.mark.slow() def test_pickle_functions_in_main(tmp_path): script = """ from dask.distributed import Client if __name__ == "__main__": with Client(n_workers=1) as client: def func(df): return (df + 5) client.submit(func, 5).result() print("script successful", flush=True) """ with open(tmp_path / "script.py", mode="w") as f: f.write(script) with popen([sys.executable, tmp_path / "script.py"], capture_output=True) as proc: out, _ = proc.communicate(timeout=60) assert "script successful" in out.decode("utf-8") @pytest.mark.parametrize("serializer", ["dask", "pickle"]) def test_pickle_zero_copy_read_only_flag(serializer): np = pytest.importorskip("numpy") a = np.arange(10) a.flags.writeable = False header, frames = serialize(a, serializers=[serializer]) frames = [bytearray(f) for f in frames] # Simulate network transfer b = deserialize(header, frames) c = deserialize(header, frames) assert not b.flags.writeable assert not c.flags.writeable ptr_a = a.__array_interface__["data"][0] ptr_b = b.__array_interface__["data"][0] ptr_c = c.__array_interface__["data"][0] assert ptr_b != ptr_a assert ptr_b == ptr_c
NestedNoPickle
python
huggingface__transformers
src/transformers/generation/streamers.py
{ "start": 1164, "end": 6280 }
class ____(BaseStreamer): """ Simple text streamer that prints the token(s) to stdout as soon as entire words are formed. <Tip warning={true}> The API for the streamer classes is still under development and may change in the future. </Tip> Parameters: tokenizer (`AutoTokenizer`): The tokenized used to decode the tokens. skip_prompt (`bool`, *optional*, defaults to `False`): Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. decode_kwargs (`dict`, *optional*): Additional keyword arguments to pass to the tokenizer's `decode` method. Examples: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> streamer = TextStreamer(tok) >>> # Despite returning the usual output, the streamer will also print the generated text to stdout. >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20) An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, ``` """ def __init__(self, tokenizer: AutoTokenizer, skip_prompt: bool = False, **decode_kwargs): self.tokenizer = tokenizer self.skip_prompt = skip_prompt self.decode_kwargs = decode_kwargs # variables used in the streaming process self.token_cache = [] self.print_len = 0 self.next_tokens_are_prompt = True def put(self, value): """ Receives tokens, decodes them, and prints them to stdout as soon as they form entire words. """ if len(value.shape) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1") elif len(value.shape) > 1: value = value[0] if self.skip_prompt and self.next_tokens_are_prompt: self.next_tokens_are_prompt = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist()) text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) # After the symbol for a new line, we flush the cache. if text.endswith("\n"): printable_text = text[self.print_len :] self.token_cache = [] self.print_len = 0 # If the last token is a CJK character, we print the characters. elif len(text) > 0 and self._is_chinese_char(ord(text[-1])): printable_text = text[self.print_len :] self.print_len += len(printable_text) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: printable_text = text[self.print_len : text.rfind(" ") + 1] self.print_len += len(printable_text) self.on_finalized_text(printable_text) def end(self): """Flushes any remaining cache and prints a newline to stdout.""" # Flush the cache, if it exists if len(self.token_cache) > 0: text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) printable_text = text[self.print_len :] self.token_cache = [] self.print_len = 0 else: printable_text = "" self.next_tokens_are_prompt = True self.on_finalized_text(printable_text, stream_end=True) def on_finalized_text(self, text: str, stream_end: bool = False): """Prints the new text to stdout. If the stream is ending, also prints a newline.""" print(text, flush=True, end="" if not stream_end else None) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) or (cp >= 0x20000 and cp <= 0x2A6DF) or (cp >= 0x2A700 and cp <= 0x2B73F) or (cp >= 0x2B740 and cp <= 0x2B81F) or (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) ): return True return False
TextStreamer
python
sympy__sympy
sympy/assumptions/predicates/order.py
{ "start": 7780, "end": 8301 }
class ____(Predicate): """ Nonzero extended real number predicate. Explanation =========== ``ask(Q.extended_nonzero(x))`` is true iff ``x`` is extended real and ``x`` is not zero. Examples ======== >>> from sympy import ask, I, oo, Q >>> ask(Q.extended_nonzero(-1)) True >>> ask(Q.extended_nonzero(oo)) True >>> ask(Q.extended_nonzero(I)) False """ name = 'extended_nonzero' handler = Dispatcher("ExtendedNonZeroHandler")
ExtendedNonZeroPredicate
python
getsentry__sentry
src/sentry/taskworker/workerchild.py
{ "start": 1016, "end": 18452 }
class ____(BaseException): pass def child_worker_init(process_type: str) -> None: """ Configure django and load task modules for workers Child worker processes are spawned and don't inherit db connections or configuration from the parent process. """ from sentry.runner import configure if process_type == "spawn": configure() @contextlib.contextmanager def timeout_alarm( seconds: int, handler: Callable[[int, FrameType | None], None] ) -> Generator[None]: """ Context manager to handle SIGALRM handlers To prevent tasks from consuming a worker forever, we set a timeout alarm that will interrupt tasks that run longer than their processing_deadline. """ original = signal.signal(signal.SIGALRM, handler) try: signal.alarm(seconds) yield finally: signal.alarm(0) signal.signal(signal.SIGALRM, original) def load_parameters(data: str, headers: dict[str, str]) -> dict[str, Any]: compression_type = headers.get("compression-type", None) if not compression_type or compression_type == CompressionType.PLAINTEXT.value: return orjson.loads(data) elif compression_type == CompressionType.ZSTD.value: return orjson.loads(zstd.decompress(base64.b64decode(data))) else: logger.error( "Unsupported compression type: %s. Continuing with plaintext.", compression_type ) return orjson.loads(data) def status_name(status: TaskActivationStatus.ValueType) -> str: """Convert a TaskActivationStatus to a human readable name""" if status == TASK_ACTIVATION_STATUS_COMPLETE: return "complete" if status == TASK_ACTIVATION_STATUS_FAILURE: return "failure" if status == TASK_ACTIVATION_STATUS_RETRY: return "retry" return f"unknown-{status}" def child_process( app_module: str, child_tasks: queue.Queue[InflightTaskActivation], processed_tasks: queue.Queue[ProcessingResult], shutdown_event: Event, max_task_count: int | None, processing_pool_name: str, process_type: str, ) -> None: """ The entrypoint for spawned worker children. Any import that could pull in django needs to be put inside this functiona and not the module root. If modules that include django are imported at the module level the wrong django settings will be used. """ child_worker_init(process_type) from sentry.taskworker.app import import_app from sentry.taskworker.retry import NoRetriesRemainingError from sentry.taskworker.state import clear_current_task, current_task, set_current_task from sentry.taskworker.task import Task from sentry.utils import metrics from sentry.utils.memory import track_memory_usage app = import_app(app_module) app.load_modules() taskregistry = app.taskregistry def _get_known_task(activation: TaskActivation) -> Task[Any, Any] | None: if not taskregistry.contains(activation.namespace): logger.error( "taskworker.invalid_namespace", extra={"namespace": activation.namespace, "taskname": activation.taskname}, ) return None namespace = taskregistry.get(activation.namespace) if not namespace.contains(activation.taskname): logger.error( "taskworker.invalid_taskname", extra={"namespace": activation.namespace, "taskname": activation.taskname}, ) return None return namespace.get(activation.taskname) def run_worker( child_tasks: queue.Queue[InflightTaskActivation], processed_tasks: queue.Queue[ProcessingResult], shutdown_event: Event, max_task_count: int | None, processing_pool_name: str, process_type: str, ) -> None: processed_task_count = 0 def handle_alarm(signum: int, frame: FrameType | None) -> None: """ Handle SIGALRM If we hit an alarm in a child, we need to push a result and terminate the child. """ deadline = -1 current = current_task() taskname = "unknown" if current: taskname = current.taskname deadline = current.processing_deadline_duration raise ProcessingDeadlineExceeded( f"execution deadline of {deadline} seconds exceeded by {taskname}" ) while not shutdown_event.is_set(): if max_task_count and processed_task_count >= max_task_count: metrics.incr( "taskworker.worker.max_task_count_reached", tags={"count": processed_task_count, "processing_pool": processing_pool_name}, ) logger.info( "taskworker.max_task_count_reached", extra={"count": processed_task_count} ) break try: inflight = child_tasks.get(timeout=1.0) except queue.Empty: metrics.incr( "taskworker.worker.child_task_queue_empty", tags={"processing_pool": processing_pool_name}, ) continue task_func = _get_known_task(inflight.activation) if not task_func: metrics.incr( "taskworker.worker.unknown_task", tags={ "namespace": inflight.activation.namespace, "taskname": inflight.activation.taskname, "processing_pool": processing_pool_name, }, sample_rate=1.0, ) with sentry_sdk.isolation_scope() as scope: scope.set_tag("taskname", inflight.activation.taskname) scope.set_tag("namespace", inflight.activation.namespace) scope.set_tag("processing_pool", processing_pool_name) scope.set_extra("activation", str(inflight.activation)) scope.capture_message( f"Unregistered task {inflight.activation.taskname} was not executed" ) processed_tasks.put( ProcessingResult( task_id=inflight.activation.id, status=TASK_ACTIVATION_STATUS_FAILURE, host=inflight.host, receive_timestamp=inflight.receive_timestamp, ) ) continue if task_func.at_most_once: if app.should_attempt_at_most_once(inflight.activation): metrics.incr( "taskworker.task.at_most_once.executed", tags={ "namespace": inflight.activation.namespace, "taskname": inflight.activation.taskname, "processing_pool": processing_pool_name, }, ) else: metrics.incr( "taskworker.worker.at_most_once.skipped", tags={ "namespace": inflight.activation.namespace, "taskname": inflight.activation.taskname, "processing_pool": processing_pool_name, }, ) continue set_current_task(inflight.activation) next_state = TASK_ACTIVATION_STATUS_FAILURE # Use time.time() so we can measure against activation.received_at execution_start_time = time.time() try: with timeout_alarm(inflight.activation.processing_deadline_duration, handle_alarm): _execute_activation(task_func, inflight.activation) next_state = TASK_ACTIVATION_STATUS_COMPLETE except ProcessingDeadlineExceeded as err: with sentry_sdk.isolation_scope() as scope: scope.fingerprint = [ "taskworker.processing_deadline_exceeded", inflight.activation.namespace, inflight.activation.taskname, ] scope.set_transaction_name(inflight.activation.taskname) sentry_sdk.capture_exception(err) metrics.incr( "taskworker.worker.processing_deadline_exceeded", tags={ "processing_pool": processing_pool_name, "namespace": inflight.activation.namespace, "taskname": inflight.activation.taskname, }, ) next_state = TASK_ACTIVATION_STATUS_FAILURE except Exception as err: retry = task_func.retry captured_error = False if retry: if retry.should_retry(inflight.activation.retry_state, err): logger.info( "taskworker.task.retry", extra={ "namespace": inflight.activation.namespace, "taskname": inflight.activation.taskname, "processing_pool": processing_pool_name, "error": str(err), }, ) next_state = TASK_ACTIVATION_STATUS_RETRY elif retry.max_attempts_reached(inflight.activation.retry_state): with sentry_sdk.isolation_scope() as scope: retry_error = NoRetriesRemainingError( f"{inflight.activation.taskname} has consumed all of its retries" ) retry_error.__cause__ = err scope.fingerprint = [ "taskworker.no_retries_remaining", inflight.activation.namespace, inflight.activation.taskname, ] scope.set_transaction_name(inflight.activation.taskname) sentry_sdk.capture_exception(retry_error) captured_error = True if not captured_error and next_state != TASK_ACTIVATION_STATUS_RETRY: sentry_sdk.capture_exception(err) clear_current_task() processed_task_count += 1 # Get completion time before pushing to queue, so we can measure queue append time execution_complete_time = time.time() with metrics.timer( "taskworker.worker.processed_tasks.put.duration", tags={ "processing_pool": processing_pool_name, }, ): processed_tasks.put( ProcessingResult( task_id=inflight.activation.id, status=next_state, host=inflight.host, receive_timestamp=inflight.receive_timestamp, ) ) record_task_execution( inflight.activation, next_state, execution_start_time, execution_complete_time, processing_pool_name, inflight.host, ) def _execute_activation(task_func: Task[Any, Any], activation: TaskActivation) -> None: """Invoke a task function with the activation parameters.""" headers = {k: v for k, v in activation.headers.items()} parameters = load_parameters(activation.parameters, headers) args = parameters.get("args", []) kwargs = parameters.get("kwargs", {}) transaction = sentry_sdk.continue_trace( environ_or_headers=headers, op="queue.task.taskworker", name=activation.taskname, origin="taskworker", ) sampling_context = { "taskworker": { "task": activation.taskname, } } with ( track_memory_usage( "taskworker.worker.memory_change", tags={"namespace": activation.namespace, "taskname": activation.taskname}, ), sentry_sdk.isolation_scope(), sentry_sdk.start_transaction(transaction, custom_sampling_context=sampling_context), ): transaction.set_data( "taskworker-task", {"args": args, "kwargs": kwargs, "id": activation.id} ) task_added_time = activation.received_at.ToDatetime().timestamp() # latency attribute needs to be in milliseconds latency = (time.time() - task_added_time) * 1000 with sentry_sdk.start_span( op=OP.QUEUE_PROCESS, name=activation.taskname, origin="taskworker", ) as span: span.set_data(SPANDATA.MESSAGING_DESTINATION_NAME, activation.namespace) span.set_data(SPANDATA.MESSAGING_MESSAGE_ID, activation.id) span.set_data(SPANDATA.MESSAGING_MESSAGE_RECEIVE_LATENCY, latency) span.set_data( SPANDATA.MESSAGING_MESSAGE_RETRY_COUNT, activation.retry_state.attempts ) span.set_data(SPANDATA.MESSAGING_SYSTEM, "taskworker") # TODO(taskworker) remove this when doing cleanup # The `__start_time` parameter is spliced into task parameters by # sentry.celery.SentryTask._add_metadata and needs to be removed # from kwargs like sentry.tasks.base.instrumented_task does. if "__start_time" in kwargs: kwargs.pop("__start_time") try: task_func(*args, **kwargs) transaction.set_status(SPANSTATUS.OK) except Exception: transaction.set_status(SPANSTATUS.INTERNAL_ERROR) raise def record_task_execution( activation: TaskActivation, status: TaskActivationStatus.ValueType, start_time: float, completion_time: float, processing_pool_name: str, taskbroker_host: str, ) -> None: task_added_time = activation.received_at.ToDatetime().timestamp() execution_duration = completion_time - start_time execution_latency = completion_time - task_added_time logger.debug( "taskworker.task_execution", extra={ "taskname": activation.taskname, "execution_duration": execution_duration, "execution_latency": execution_latency, "status": status_name(status), }, ) metrics.incr( "taskworker.worker.execute_task", tags={ "namespace": activation.namespace, "taskname": activation.taskname, "status": status_name(status), "processing_pool": processing_pool_name, "taskbroker_host": taskbroker_host, }, ) metrics.distribution( "taskworker.worker.execution_duration", execution_duration, tags={ "namespace": activation.namespace, "taskname": activation.taskname, "processing_pool": processing_pool_name, "taskbroker_host": taskbroker_host, }, ) metrics.distribution( "taskworker.worker.execution_latency", execution_latency, tags={ "namespace": activation.namespace, "taskname": activation.taskname, "processing_pool": processing_pool_name, "taskbroker_host": taskbroker_host, }, ) namespace = taskregistry.get(activation.namespace) metrics.incr( "taskworker.cogs.usage", amount=int(execution_duration * 1000), tags={"feature": namespace.app_feature}, ) if ( "sentry-monitor-check-in-id" in activation.headers and "sentry-monitor-slug" in activation.headers ): monitor_status = MonitorStatus.ERROR if status == TASK_ACTIVATION_STATUS_COMPLETE: monitor_status = MonitorStatus.OK capture_checkin( monitor_slug=activation.headers["sentry-monitor-slug"], check_in_id=activation.headers["sentry-monitor-check-in-id"], duration=execution_duration, status=monitor_status, ) # Run the worker loop run_worker( child_tasks, processed_tasks, shutdown_event, max_task_count, processing_pool_name, process_type, )
ProcessingDeadlineExceeded
python
spack__spack
lib/spack/spack/error.py
{ "start": 4494, "end": 4604 }
class ____(NoSuchPatchError): """Raised when a patch file cannot be located from sha256."""
PatchLookupError
python
pypa__hatch
tests/index/test_core.py
{ "start": 139, "end": 312 }
class ____: def test_normalization(self): index = PackageIndex("Https://Foo.Internal/z/../a/b/") assert index.repo == "https://foo.internal/a/b/"
TestRepo
python
huggingface__transformers
src/transformers/models/speecht5/tokenization_speecht5.py
{ "start": 1053, "end": 6722 }
class ____(SentencePieceBackend): """ Construct a SpeechT5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. bos_token (`str`, *optional*, defaults to `"<s>"`): The begin of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. normalize (`bool`, *optional*, defaults to `False`): Whether to convert numeric quantities in the text to their spelt-out english counterparts. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] is_fast = False def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", normalize=False, sp_model_kwargs: Optional[dict[str, Any]] = None, **kwargs, ) -> None: self.normalize = normalize self._normalizer = None # Prepare sp_model_kwargs for parent class if sp_model_kwargs is not None: kwargs["sp_model_kwargs"] = sp_model_kwargs # Call parent init (which will load sp_model) super().__init__( vocab_file=vocab_file, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, normalize=normalize, **kwargs, ) def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): normalize = kwargs.pop("normalize", self.normalize) if is_split_into_words: text = " " + text if normalize: text = self.normalizer(text) return (text, kwargs) @property def normalizer(self): if self._normalizer is None: self._normalizer = EnglishNumberNormalizer() return self._normalizer @normalizer.setter def normalizer(self, value): self._normalizer = value def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]: """Build model inputs from a sequence by appending eos_token_id.""" if token_ids_1 is None: return token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) suffix_ones = [1] if token_ids_1 is None: return ([0] * len(token_ids_0)) + suffix_ones return ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. SpeechT5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + token_ids_1 + eos) * [0] __all__ = ["SpeechT5Tokenizer"]
SpeechT5Tokenizer
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_raise.py
{ "start": 7735, "end": 8339 }
class ____(__TestCase): def test_sets_traceback(self): try: raise IndexError() except IndexError as e: self.assertIsInstance(e.__traceback__, types.TracebackType) else: self.fail("No exception raised") def test_accepts_traceback(self): tb = get_tb() try: raise IndexError().with_traceback(tb) except IndexError as e: self.assertNotEqual(e.__traceback__, tb) self.assertEqual(e.__traceback__.tb_next, tb) else: self.fail("No exception raised")
TestTraceback
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/hooks/cloud_logging.py
{ "start": 1388, "end": 4360 }
class ____(GoogleBaseHook): """ Hook for Google Cloud Logging Log Sinks API. :param gcp_conn_id: The connection ID to use when fetching connection info. :param impersonation_chain: Optional service account to impersonate. """ def __init__( self, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(gcp_conn_id=gcp_conn_id, impersonation_chain=impersonation_chain, **kwargs) self._client: ConfigServiceV2Client | None = None def get_conn(self) -> ConfigServiceV2Client: """Return the Google Cloud Logging Config client.""" if not self._client: self._client = ConfigServiceV2Client(credentials=self.get_credentials(), client_info=CLIENT_INFO) return self._client def get_parent(self, project_id): return f"projects/{project_id}" @GoogleBaseHook.fallback_to_default_project_id def create_sink( self, sink: LogSink | dict, unique_writer_identity: bool = True, project_id: str = PROVIDE_PROJECT_ID ) -> LogSink: if isinstance(sink, dict): sink = LogSink(**sink) request = CreateSinkRequest( parent=self.get_parent(project_id), sink=sink, unique_writer_identity=unique_writer_identity ) return self.get_conn().create_sink(request=request) @GoogleBaseHook.fallback_to_default_project_id def get_sink(self, sink_name: str, project_id: str = PROVIDE_PROJECT_ID) -> LogSink: request = GetSinkRequest(sink_name=f"projects/{project_id}/sinks/{sink_name}") return self.get_conn().get_sink(request=request) @GoogleBaseHook.fallback_to_default_project_id def list_sinks(self, page_size: int | None = None, project_id: str = PROVIDE_PROJECT_ID) -> list[LogSink]: request = ListSinksRequest(parent=self.get_parent(project_id), page_size=page_size) return list(self.get_conn().list_sinks(request=request)) @GoogleBaseHook.fallback_to_default_project_id def delete_sink(self, sink_name: str, project_id: str = PROVIDE_PROJECT_ID) -> None: request = DeleteSinkRequest(sink_name=f"projects/{project_id}/sinks/{sink_name}") self.get_conn().delete_sink(request=request) @GoogleBaseHook.fallback_to_default_project_id def update_sink( self, sink_name: str, sink: LogSink | dict, unique_writer_identity: bool, update_mask: FieldMask | dict, project_id: str = PROVIDE_PROJECT_ID, ) -> LogSink: if isinstance(sink, dict): sink = LogSink(**sink) request = UpdateSinkRequest( sink_name=f"projects/{project_id}/sinks/{sink_name}", sink=sink, unique_writer_identity=unique_writer_identity, update_mask=update_mask, ) return self.get_conn().update_sink(request=request)
CloudLoggingHook
python
fluentpython__example-code-2e
23-descriptor/bulkfood/bulkfood_v4.py
{ "start": 991, "end": 1366 }
class ____: def __set_name__(self, owner, name): # <1> self.storage_name = name # <2> def __set__(self, instance, value): # <3> if value > 0: instance.__dict__[self.storage_name] = value else: msg = f'{self.storage_name} must be > 0' raise ValueError(msg) # no __get__ needed # <4>
Quantity
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 630393, "end": 631428 }
class ____(ExprNode): # This class exists to pass the first argument of a function # to a critical_section. Mostly just to defer analysis since # func.args isn't available for cdef functions until the # analyse_declarations stage # # func_node - FuncDefNode subexprs = ['name_node'] name_node = None type = PyrexTypes.py_object_type def analyse_declarations(self, env): if len(self.func_node.args) < 1: error(self.pos, "critical_section directive can only be applied to a function with one or more positional arguments") return self.name_node = NameNode(self.pos, name=self.func_node.args[0].declared_name()) self.name_node.analyse_declarations(env) self.type = self.name_node.type def analyse_expressions(self, env): # At this stage, just substitute the name node if self.name_node: return self.name_node.analyse_expressions(env) return self # error earlier
FirstArgumentForCriticalSectionNode
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/utils/conventions.py
{ "start": 417, "end": 701 }
class ____: """A factory for sentinel objects with nice reprs.""" def __init__(self, identifier: str) -> None: self.identifier = identifier def __repr__(self) -> str: return self.identifier infer = ... not_set = UniqueIdentifier("not_set")
UniqueIdentifier
python
pymupdf__PyMuPDF
src/table.py
{ "start": 49539, "end": 49789 }
class ____: """PyMuPDF extension containing the identified table header.""" def __init__(self, bbox, cells, names, above): self.bbox = bbox self.cells = cells self.names = names self.external = above
TableHeader
python
Textualize__textual
tests/test_binding_inheritance.py
{ "start": 11382, "end": 12536 }
class ____(AppKeyRecorder): """An app with a non-default screen that handles movement key bindings.""" SCREENS = {"main": ScreenWithMovementBindings} def on_mount(self) -> None: self.push_screen("main") async def test_focused_child_widget_with_movement_bindings_on_screen() -> None: """A focused child widget, with movement bindings in the screen, should trigger screen actions.""" async with AppWithScreenWithBindingsWidgetNoBindings().run_test() as pilot: await pilot.press(*AppKeyRecorder.ALL_KEYS) pilot.app.all_recorded("screenly_") ############################################################################## # A focused widget within a container within a screen that handles bindings. # # Similar again to the previous test, here we're wrapping an app around a # non-default screen, which in turn wraps a container which wraps a widget # that can have, and will have, focus. The issue here is that if the # container isn't scrolling, especially if it's set up to just wrap a widget # and do nothing else, it should not rob the screen of the binding hits.
AppWithScreenWithBindingsWidgetNoBindings
python
Pylons__pyramid
tests/test_scripting.py
{ "start": 7664, "end": 7866 }
class ____: @classmethod def blank(cls, path): req = DummyRequest({'path': path}) return req def __init__(self, *a, **kw): self.a = a self.kw = kw
DummyFactory
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py
{ "start": 3159, "end": 3286 }
class ____(TestModel1): def __str__(self): return self.new_field # Subclass with inherited __str__
SubclassTestModel1
python
doocs__leetcode
solution/2200-2299/2248.Intersection of Multiple Arrays/Solution2.py
{ "start": 0, "end": 312 }
class ____: def intersection(self, nums: List[List[int]]) -> List[int]: cnt = Counter() ans = [] for arr in nums: for x in arr: cnt[x] += 1 if cnt[x] == len(nums): ans.append(x) ans.sort() return ans
Solution
python
neetcode-gh__leetcode
python/1750-minimum-length-of-string-after-deleting-similar-ends.py
{ "start": 0, "end": 308 }
class ____: def minimumLength(self, s: str) -> int: l, r = 0, len(s) - 1 while l < r and s[l] == s[r]: tmp = s[l] while l <= r and s[l] == tmp: l += 1 while l <= r and s[r] == tmp: r -= 1 return (r - l + 1)
Solution
python
huggingface__transformers
tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py
{ "start": 16698, "end": 32477 }
class ____(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct") self.messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What kind of dog is this?"}, ], } ] img_url = url_to_local_path("https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/demo_small.jpg") self.image = load_image(img_url).convert("RGB") cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow def test_small_model_integration_test(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text], images=[self.image], return_tensors="pt") expected_input_ids = [151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 151652, 151655, 151655] # fmt: skip torch.testing.assert_close(expected_input_ids, inputs.input_ids[0].tolist()[:17]) expected_pixel_slice = torch.tensor( [ [0.8792, 0.8792, 0.9084], [1.1858, 1.1858, 1.2296], [1.2004, 1.2004, 1.2150], [1.4340, 1.4340, 1.4194], [1.3902, 1.4048, 1.4194], [1.5216, 1.5362, 1.5362], ], dtype=torch.float32, device="cpu", ) torch.testing.assert_close(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=5e-4, rtol=1e-5) # verify generation inputs = inputs.to(torch_device) output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in" self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_expand(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text], images=[self.image], return_tensors="pt").to(torch_device) output = model.generate(**inputs, max_new_tokens=30, num_return_sequences=3) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_wo_image(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', 'system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\n addCriterion', ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_different_resolutions(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) text2 = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) image2 = self.image.resize((224, 224)) inputs = self.processor( text=[text, text2], images=[self.image, image2], padding=True, return_tensors="pt", ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) expected_decoded_texts = Expectations( { (None, None): [ "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in", "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\n addCriterion\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and gentle nature, which is", ], ("cuda", (8, 6)): [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', ], ("rocm", None): [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\n addCriterion\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and gentle nature, which is', ], } ).get_expectation() # fmt: skip decoded_texts = self.processor.batch_decode(output, skip_special_tokens=True) for i, (expected, decoded) in enumerate(zip(expected_decoded_texts, decoded_texts)): self.assertEqual( decoded, expected, f"Decoded text {i}:\n{repr(decoded)}\ndoes not match expected decoded text:\n{repr(expected)}", ) @slow @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test def test_small_model_integration_test_batch_flashatt2(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) expected_decoded_text = Expectations({ ("cuda", None): "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in", ("rocm", (9, 4)): "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in" }).get_expectation() # fmt: skip # Since the test is to generate twice the same text, we just test twice against the expected decoded text decoded_texts = self.processor.batch_decode(output, skip_special_tokens=True) self.assertEqual(decoded_texts[0], expected_decoded_text) self.assertEqual(decoded_texts[1], expected_decoded_text) @slow @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test def test_small_model_integration_test_batch_wo_image_flashatt2(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) # FIXME: The second decoded text in the CUDA expectation seems to be incorrect, it used to be the second text # on the ROCm expectation that was the correct one. Either model changed or code is buggy. EXPECTED_DECODED_TEXT = Expectations({ ("cuda", None): [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', "system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\n�\n\n addCriterion\nI'm sorry, but I don't understand your question. Could you please provide more context or clarify what you're asking", ], ("rocm", (9, 4)): [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in', "system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\nI am Qwen, a large language model created by Alibaba Cloud. I am designed to answer a wide range of questions and provide information on various topics", ], }).get_expectation() # fmt: skip decoded_text = self.processor.batch_decode(output, skip_special_tokens=True) self.assertEqual(decoded_text, EXPECTED_DECODED_TEXT) @slow @require_cv2 def test_small_model_integration_test_with_video(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", dtype="auto", device_map="auto" ) video_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4" messages2 = [ { "role": "user", "content": [ { "type": "video", }, {"type": "text", "text": "What is shown in this video?"}, ], } ] text = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) with tempfile.NamedTemporaryFile(suffix=".mp4") as f: f.write(requests.get(video_url).content) f.flush() cap = cv2.VideoCapture(f.name) frames = [] while True: ret, frame = cap.read() if not ret: break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frames.append(Image.fromarray(frame_rgb).resize((224, 224), Image.BICUBIC)) cap.release() inputs = self.processor(text=[text], videos=[frames], return_tensors="pt").to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat is shown in this video?\nassistant\nThe video shows an indoor tennis court with a person standing on one side, preparing to serve the ball. The individual is dressed in athletic attire, including', ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, )
Qwen2_5_VLIntegrationTest
python
ansible__ansible
test/lib/ansible_test/_util/target/setup/requirements.py
{ "start": 9514, "end": 9600 }
class ____(Exception): """Base class for application exceptions."""
ApplicationError
python
facelessuser__pymdown-extensions
tests/test_extensions/test_blocks/test_general_blocks.py
{ "start": 5047, "end": 5342 }
class ____(unittest.TestCase): """Test registration cases.""" def test_duplicates(self): """Test duplicates.""" with self.assertRaises(ValueError): markdown.markdown('test', extensions=['pymdownx.blocks.admonition', 'pymdownx.blocks.admonition'])
TestRegister
python
huggingface__transformers
src/transformers/models/speecht5/modeling_speecht5.py
{ "start": 79103, "end": 86231 }
class ____(SpeechT5PreTrainedModel): def __init__( self, config: SpeechT5Config, encoder: Optional[nn.Module] = None, decoder: Optional[nn.Module] = None, ): r""" encoder (`PreTrainedModel`, *optional*): The encoder model to use. decoder (`PreTrainedModel`, *optional*): The decoder model to use. """ super().__init__(config) self.config = config self.encoder = SpeechT5EncoderWithoutPrenet(config) if encoder is None else encoder self.decoder = SpeechT5DecoderWithoutPrenet(config) if decoder is None else decoder # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): if isinstance(self.encoder, SpeechT5EncoderWithTextPrenet): return self.encoder.get_input_embeddings() if isinstance(self.decoder, SpeechT5DecoderWithTextPrenet): return self.decoder.get_input_embeddings() raise NotImplementedError def set_input_embeddings(self, value): if isinstance(self.encoder, SpeechT5EncoderWithTextPrenet): self.encoder.set_input_embeddings(value) if isinstance(self.decoder, SpeechT5DecoderWithTextPrenet): self.decoder.set_input_embeddings(value) def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ if isinstance(self.encoder, SpeechT5EncoderWithSpeechPrenet): self.encoder.prenet.freeze_feature_encoder() @auto_docstring def forward( self, input_values: Optional[torch.Tensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_values: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = None, speaker_embeddings: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" input_values (`torch.Tensor` of shape `(batch_size, sequence_length)`): Depending on which encoder is being used, the `input_values` are either: float values of the input raw speech waveform, or indices of input sequence tokens in the vocabulary, or hidden states. decoder_input_values (`torch.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Depending on which decoder is being used, the `decoder_input_values` are either: float values of log-mel filterbank features extracted from the raw speech waveform, or indices of decoder input sequence tokens in the vocabulary, or hidden states. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_values`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`SpeechT5Decoder._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*): Tensor containing the speaker embeddings. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_values=input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # downsample encoder attention mask (only for encoders with speech input) if attention_mask is not None and isinstance(self.encoder, SpeechT5EncoderWithSpeechPrenet): encoder_attention_mask = self.encoder.prenet._get_feature_vector_attention_mask( encoder_outputs[0].shape[1], attention_mask ) else: encoder_attention_mask = attention_mask if isinstance(self.decoder, SpeechT5DecoderWithSpeechPrenet): decoder_args = {"speaker_embeddings": speaker_embeddings} else: decoder_args = {} decoder_outputs = self.decoder( input_values=decoder_input_values, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **decoder_args, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" SpeechT5 Model with a speech encoder and a text decoder. """ )
SpeechT5Model
python
airbytehq__airbyte
airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py
{ "start": 1678, "end": 1767 }
class ____(MetafieldShopifySubstream): parent_stream_class = Articles
MetafieldArticles
python
pytorch__pytorch
torch/ao/quantization/quantizer/quantizer.py
{ "start": 3642, "end": 4586 }
class ____: """How are input argument or output should be quantized, expressed as QuantizationSpec, this corresponds to how a Tensor in the operator Graph is observed (PTQ) or fake quantized (QAT) """ # a map from torch.fx.Node to a type of QuantizationSpecBase input_qspec_map: dict[Node, QuantizationSpecBase | None] = field( default_factory=dict ) # How the output of this node is quantized, expressed as QuantizationSpec # TODO: change the value to QuantizationSpec in a separate PR output_qspec: QuantizationSpecBase | None = None # For a Node: node1 and edge: (node1, node2), since they are observing the same # Tensor, we may want to implicitly share observers, this flag allows people to # turn off this behavior for the output of the node allow_implicit_sharing: bool = True # whether the node is annotated or not _annotated: bool = False
QuantizationAnnotation
python
pypa__packaging
tests/test_markers.py
{ "start": 1923, "end": 2619 }
class ____: def test_prefers_pep440(self) -> None: assert Marker('"2.7.9" < "foo"').evaluate(dict(foo="2.7.10")) def test_falls_back_to_python(self) -> None: assert Marker('"b" > "a"').evaluate(dict(a="a")) def test_fails_when_undefined(self) -> None: with pytest.raises(UndefinedComparison): Marker("'2.7.0' ~= os_name").evaluate() def test_allows_prerelease(self) -> None: assert Marker('python_full_version > "3.6.2"').evaluate( {"python_full_version": "3.11.0a5"} ) FakeVersionInfo = collections.namedtuple( "FakeVersionInfo", ["major", "minor", "micro", "releaselevel", "serial"] )
TestOperatorEvaluation
python
getsentry__sentry
src/sentry/notifications/notifications/activity/unassigned.py
{ "start": 199, "end": 773 }
class ____(GroupActivityNotification): metrics_key = "unassigned_activity" title = "Unassigned" def get_description(self) -> tuple[str, str | None, Mapping[str, Any]]: return "{author} unassigned {an issue}", None, {} def get_notification_title( self, provider: ExternalProviders, context: Mapping[str, Any] | None = None ) -> str: user = self.user if user: author = user.name or user.email else: author = "Sentry" return f"Issue unassigned by {author}"
UnassignedActivityNotification
python
streamlit__streamlit
lib/tests/streamlit/elements/download_button_test.py
{ "start": 795, "end": 6515 }
class ____(DeltaGeneratorTestCase): """Test ability to marshall download_button protos.""" @parameterized.expand([("hello world",), (b"byteshere",)]) def test_just_label(self, data): """Test that it can be called with label and string or bytes data.""" st.download_button("the label", data=data) c = self.get_delta_from_queue().new_element.download_button assert c.label == "the label" assert c.type == "secondary" assert not c.disabled assert not c.ignore_rerun def test_emoji_icon(self): """Test that it can be called with emoji icon.""" st.download_button("the label", icon="⚡", data="juststring") c = self.get_delta_from_queue().new_element.download_button assert c.icon == "⚡" def test_material_icon(self): """Test that it can be called with material icon.""" st.download_button("the label", icon=":material/thumb_up:", data="juststring") c = self.get_delta_from_queue().new_element.download_button assert c.icon == ":material/thumb_up:" def test_just_disabled(self): """Test that it can be called with disabled param.""" st.download_button("the label", data="juststring", disabled=True) c = self.get_delta_from_queue().new_element.download_button assert c.disabled def test_url_exist(self): """Test that file url exist in proto.""" st.download_button("the label", data="juststring") c = self.get_delta_from_queue().new_element.download_button assert "/media/" in c.url def test_sets_ignore_rerun(self): """Test that it can be called with on_click="ignore".""" st.download_button("the label", data="juststring", on_click="ignore") c = self.get_delta_from_queue().new_element.download_button assert c.ignore_rerun @parameterized.expand(["primary", "secondary", "tertiary"]) def test_type(self, type): """Test that it can be called with type param.""" st.download_button("the label", data="Streamlit", type=type) c = self.get_delta_from_queue().new_element.download_button assert c.type == type def test_shows_cached_widget_replay_warning(self): """Test that a warning is shown when this widget is used inside a cached function.""" st.cache_data(lambda: st.download_button("the label", data="juststring"))() # The widget itself is still created, so we need to go back one element more: el = self.get_delta_from_queue(-2).new_element.exception assert el.type == "CachedWidgetWarning" assert el.is_warning def test_callable_data_detected(self): """Test that callable data is properly detected and deferred.""" def generate_data(): return "generated content" st.download_button("Download", data=generate_data) c = self.get_delta_from_queue().new_element.download_button assert c.HasField("deferred_file_id") assert c.deferred_file_id != "" # Value set by runtime assert c.url == "" def test_callable_with_lambda(self): """Test that lambda functions work as callables.""" st.download_button("Download", data=lambda: "lambda content") c = self.get_delta_from_queue().new_element.download_button assert c.HasField("deferred_file_id") assert c.deferred_file_id != "" assert c.url == "" def test_callable_returns_bytes(self): """Test that callable returning bytes is handled correctly.""" st.download_button("Download", data=lambda: b"bytes content") c = self.get_delta_from_queue().new_element.download_button assert c.HasField("deferred_file_id") assert c.deferred_file_id != "" def test_callable_returns_string(self): """Test that callable returning string is handled correctly.""" st.download_button("Download", data=lambda: "string content") c = self.get_delta_from_queue().new_element.download_button assert c.HasField("deferred_file_id") def test_callable_returns_io(self): """Test that callable returning IO object is handled correctly.""" def generate_io(): return io.BytesIO(b"io content") st.download_button("Download", data=generate_io) c = self.get_delta_from_queue().new_element.download_button assert c.HasField("deferred_file_id") def test_callable_with_mime_type(self): """Test that callable with mime type is handled correctly.""" st.download_button("Download CSV", data=lambda: "csv,data", mime="text/csv") c = self.get_delta_from_queue().new_element.download_button assert c.HasField("deferred_file_id") assert c.deferred_file_id != "" def test_callable_with_file_name(self): """Test that callable with file_name is handled correctly.""" st.download_button("Download", data=lambda: "content", file_name="output.txt") c = self.get_delta_from_queue().new_element.download_button assert c.HasField("deferred_file_id") def test_non_callable_data_unchanged(self): """Test that non-callable data types still work as before.""" st.download_button("Download String", data="string data") c1 = self.get_delta_from_queue().new_element.download_button assert not c1.HasField("deferred_file_id") assert "/media/" in c1.url st.download_button("Download Bytes", data=b"bytes data") c2 = self.get_delta_from_queue().new_element.download_button assert not c2.HasField("deferred_file_id") assert "/media/" in c2.url
DownloadButtonTest
python
getsentry__sentry
tests/sentry/api/endpoints/test_email_capture.py
{ "start": 225, "end": 1761 }
class ____(APITestCase): def setUp(self) -> None: super().setUp() self.organization = self.create_organization() # demo user self.demo_user = self.create_user() self.demo_om = self.create_member( organization=self.organization, user=self.demo_user, role="member" ) @mock.patch.object(MarketoClient, "submit_form") @override_options({"demo-mode.enabled": True}) def test_capture_endpoint(self, mock_submit_form: mock.MagicMock) -> None: self.login_as(self.demo_user) url = reverse("sentry-demo-mode-email-capture") response = self.client.post(url, {"email": "test123@sentry.io"}) assert response.status_code == 200, response.content mock_submit_form.assert_called_once_with({"email": "test123@sentry.io"}) @override_options({"demo-mode.enabled": False}) def test_capture_endpoint_disabled(self) -> None: self.login_as(self.demo_user) url = reverse("sentry-demo-mode-email-capture") response = self.client.post(url, {"email": "test123@sentry.io"}) assert response.status_code == 404 @override_options({"demo-mode.enabled": True}) def test_capture_endpoint_bad_request(self) -> None: self.login_as(self.demo_user) url = reverse("sentry-demo-mode-email-capture") response = self.client.post(url, {"email": "test123"}) assert response.status_code == 400 assert response.data == {"email": ["Enter a valid email address."]}
EmailCaptureTest
python
huggingface__transformers
tests/utils/test_modeling_utils.py
{ "start": 10900, "end": 11823 }
class ____(PreTrainedModel): def __init__(self, config): super().__init__(config) self.LayerNorm = TestGammaBetaNorm() self.post_init() def forward(self): return self.LayerNorm() TINY_T5 = "patrickvonplaten/t5-tiny-random" TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification" TINY_MISTRAL = "hf-internal-testing/tiny-random-MistralForCausalLM" TINY_IMAGE_CLASSIF = "hf-internal-testing/tiny-random-SiglipForImageClassification" TINY_LLAVA = "hf-internal-testing/tiny-random-LlavaForConditionalGeneration" LOG = logging.get_logger(__name__) def check_models_equal(model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal @require_torch
TestModelGammaBeta
python
pytorch__pytorch
torch/distributed/pipelining/stage.py
{ "start": 52612, "end": 64387 }
class ____(_PipelineStageBase): """ A class representing a pipeline stage in a pipeline parallelism setup. PipelineStage assumes sequential partitioning of the model, i.e. the model is split into chunks where outputs from one chunk feed into inputs of the next chunk, with no skip connections. PipelineStage performs runtime shape/dtype inference automatically by propagating the outputs from stage0 to stage1 and so forth, in linear order. To bypass shape inference, pass the `input_args` and `output_args` to each PipelineStage instance. Args: submodule (nn.Module): The PyTorch module wrapped by this stage. stage_index (int): The ID of this stage. num_stages (int): The total number of stages. device (torch.device): The device where this stage is located. input_args (Union[torch.Tensor, Tuple[torch.tensor]], optional): The input arguments for the submodule. output_args (Union[torch.Tensor, Tuple[torch.tensor]], optional): The output arguments for the submodule. group (dist.ProcessGroup, optional): The process group for distributed training. If None, default group. dw_builder (Optional[Callable[[], Callable[..., None]]): If provided, dw_builder will build a new dw_runner function that will the W action (input weights) for F, I, W (Fwd, Input, Weight) zero bubble schedules. """ def __init__( self, submodule: nn.Module, stage_index: int, num_stages: int, device: torch.device, input_args: torch.Tensor | tuple[torch.Tensor, ...] | None = None, output_args: torch.Tensor | tuple[torch.Tensor, ...] | None = None, group: dist.ProcessGroup | None = None, dw_builder: Callable[[], Callable[..., None]] | None = None, ): super().__init__(submodule, stage_index, num_stages, device, group, dw_builder) self.inputs: list[torch.Tensor] | None = None self.inputs_meta: tuple[torch.Tensor, ...] | None = None # Note: inputs and submod should ideally be on meta device. We decided not to assert this (yet) because it # might be breaking for existing users. if input_args is None: assert output_args is None, ( "If specifying output_args, input_args must also be specified. " "Otherwise, shape inference will be performed at runtime" ) else: self.inputs_meta = ( (input_args,) if isinstance(input_args, torch.Tensor) else input_args ) if output_args is None: logger.warning( "Deprecation warning: passing input_args and performing init-time shape inference is deprecated. " "PipelineStage now supports runtime shape inference using the real inputs provided to schedule step(). " "Either delete `input_args` arg to `PipelineStage` to opt-into runtime shape inference, " "or additionally pass `output_args` to `PipelineStage` to fully override shape inference. " ) try: with torch.no_grad(): output_args = submodule(*self.inputs_meta) output_args = tree_map_only( torch.Tensor, lambda x: x.to("meta"), output_args ) except Exception as e: raise RuntimeError( "Failed to perform pipeline shape inference- are your inputs on the same device as your module?" ) from e assert output_args is not None, ( "If passing input_args, also pass output_args to override shape inference" ) self._configure_outputs_meta( (output_args,) if isinstance(output_args, torch.Tensor) else output_args ) # these are the buffers used in backwards send/recv, they are allocated later self.outputs_grad: list[torch.Tensor] = [] dbg_str = ( f"Finished pipeline stage init, {self.stage_index=}, {self.is_first=}, " # noqa: G004 f"{self.is_last=}, {self.num_stages=}, " ) if self.inputs_meta is not None: dbg_str += ( f"inputs: {[inp.shape for inp in self.inputs_meta]}, " f"output: {[output.shape for output in self.get_outputs_meta()]}" ) else: dbg_str += " running shape-inference at runtime" logger.debug(dbg_str) def _shape_inference( self, args: tuple[Any, ...], kwargs: dict[str, Any] | None = None, ): if kwargs is None: kwargs = {} assert args is not None, "Args may be an empty tuple but not None" # We skip recv communication if we're the first stage, but also if the previous stage is on the same rank # and can pass its output shapes in as args instead of using send/recv. if ( self.is_first # if not first stage, then check if prev stage is on the same rank or self.stage_index_to_group_rank[self.stage_index - 1] == self.group_rank ): logger.debug( "Shape inference: stage %s skipping recv, because shape info passed in via `args`", self.stage_index, ) args = tree_map_only(torch.Tensor, lambda x: x.to("meta"), args) else: assert len(args) == 0, ( "Can't supply input args for shape inference on non-first stage" ) objects = [None] logger.debug( "Shape inference: stage %s receiving from stage %s", self.stage_index, self.stage_index - 1, ) dist.recv_object_list( objects, src=dist.get_global_rank( self.group or dist.distributed_c10d._get_default_group(), self.stage_index_to_group_rank[self.stage_index - 1], ), group=self.group, device=self.device, use_batch=True, ) recv_args = objects[0] assert isinstance(recv_args, tuple), type(recv_args) args = recv_args # cache input shapes for use during recv buffer allocation self.inputs_meta = args args = tree_map_only( torch.Tensor, lambda x: torch.zeros_like(x, device=self.device), args ) # set attributes needed for forward with torch.no_grad(): outputs = self.submod(*args, **kwargs) # if single tensor, convert so it is always a list if isinstance(outputs, torch.Tensor): outputs = [outputs] # communicate meta outputs not real outputs for two reasons # 1 - its faster (esp. since obj coll pickles tensor data!) # 2 - avoid activating a cuda context for the src rank when unpickling on the recv end! outputs_meta = tuple( tree_map_only(torch.Tensor, lambda x: x.to("meta"), outputs) ) logger.debug( "Shape inference: stage %s inputs %s, outputs %s", self.stage_index, self.inputs_meta, outputs_meta, ) self._configure_outputs_meta(outputs_meta) # Passing outputs to the next stage: # two cases- # 1. Usually: use send/recv communication to pass the output # 2. Special case: for V-schedules, 2 'adjacent' stages (e.g. stage 3, 4 in an 8-stage 4-rank V) # pass their shape info via return value and function args rather than send/recv. if ( self.is_last # if not last stage, then check if next stage is on the same rank or self.stage_index_to_group_rank[self.stage_index + 1] == self.group_rank ): # Case (2) above: pass shape info via return value and caller passes it as args to next stage's # _shape_inference call logger.debug( "Shape inference: stage %s skipping send to next stage", self.stage_index, ) else: # Case (1): send shapes via send operation, and ensure not to return it to the caller logger.debug( "Shape inference: stage %s sending to stage %s", self.stage_index, self.stage_index + 1, ) dist.send_object_list( [outputs_meta], dst=dist.get_global_rank( self.group or dist.distributed_c10d._get_default_group(), self.stage_index_to_group_rank[self.stage_index + 1], ), group=self.group, device=self.device, use_batch=True, ) outputs_meta = tuple() return outputs_meta def _prepare_forward_infra( self, num_microbatches: int, args: tuple[Any, ...], kwargs: dict[str, Any] | None = None, ) -> tuple[Any, ...]: # TODO move self.device to an argument from step API (from its input tensors)? assert num_microbatches is not None, "TODO fix num_microbatches" outputs: tuple[Any, ...] = tuple() if self.inputs_meta is None: outputs = self._shape_inference(args, kwargs) assert self.inputs_meta is not None # Receive info during forward # TODO: create args_recv_info lazily? (same needed for PipelineStage) for chunk_id in range(num_microbatches): if not self.is_first: # We assume that we always receive from stage - 1 recv_infos = tuple( _RecvInfo( f"recv_for_{self.stage_index}_from_{self.stage_index - 1}", self.stage_index - 1, _make_tensor_from_meta(inp, self.device), ) for inp in self.inputs_meta ) # In case there is backward pass, set requires_grad for receive buffers if self.has_backward: for r in recv_infos: r.buffer.requires_grad_(True) self.args_recv_info[chunk_id] = recv_infos else: self.args_recv_info[chunk_id] = tuple( _RootArgPlaceholder(i) for i in self.inputs_meta ) # Send info during forward for each activation # only need the rank that is being sent to self.act_send_info: dict[int, list] = {} for idx in range(len(self.get_outputs_meta())): # We assume we always send to stage + 1 if not self.is_last: self.act_send_info[idx] = [self.stage_index + 1] else: self.act_send_info[idx] = [] return outputs def _create_grad_recv_info( self, act_send_info: dict, ) -> tuple[_RecvInfo, ...]: grad_recv_info: tuple[_RecvInfo, ...] = () if not self.is_last: # Receiving gradients from multiple sources is not supported # hence we only take the first destination grad_recv_info = tuple( _RecvInfo( f"recv_grad_for_{self.stage_index}_from_{dst_list[0]}", dst_list[0], _make_tensor_from_meta(self.get_outputs_meta()[idx], self.device), ) for idx, dst_list in act_send_info.items() ) return grad_recv_info
PipelineStage
python
ray-project__ray
python/ray/experimental/collective/communicator.py
{ "start": 423, "end": 1643 }
class ____: """ A communicator handle used by the driver to store handles to the actors in the communicator. """ def __init__(self, actors: List[ray.actor.ActorHandle], name: str, backend: str): """ Initializes the CommunicatorHandle with the given actor handles. Assumes that the communicator has already been initialized on all actors. Args: actors: A list of actor handles to be stored. name: Name of the communicator. backend: Communicator backend. See ray.util.collective.types for valid values. """ self._actors = actors self._name = name self._backend = Backend(backend) def get_rank(self, actor: ray.actor.ActorHandle): for i, a in enumerate(self._actors): if a == actor: return i return -1 @property def actors(self) -> List[ray.actor.ActorHandle]: """ Return all actor handles in this communicator. """ return self._actors[:] @property def name(self) -> str: return self._name @property def backend(self) -> str: return self._backend
CommunicatorHandle
python
Pylons__pyramid
tests/test_security.py
{ "start": 3441, "end": 4340 }
class ____(unittest.TestCase): def _getTargetClass(self): from pyramid.security import ACLDenied return ACLDenied def _makeOne(self, *arg, **kw): klass = self._getTargetClass() return klass(*arg, **kw) def test_it(self): from pyramid.security import Denied msg = ( "ACLDenied permission 'permission' via ACE 'ace' in ACL 'acl' " "on context 'ctx' for principals 'principals'" ) denied = self._makeOne('ace', 'acl', 'permission', 'principals', 'ctx') self.assertIsInstance(denied, Denied) self.assertTrue(msg in denied.msg) self.assertEqual(denied, False) self.assertFalse(denied) self.assertEqual(str(denied), msg) self.assertTrue('<ACLDenied instance at ' in repr(denied)) self.assertTrue("with msg %r>" % msg in repr(denied))
TestACLDenied
python
mlflow__mlflow
dev/clint/src/clint/rules/typing_extensions.py
{ "start": 36, "end": 594 }
class ____(Rule): def __init__(self, *, full_name: str, allowlist: list[str]) -> None: self.full_name = full_name self.allowlist = allowlist def _message(self) -> str: return ( f"`{self.full_name}` is not allowed to use. Only {self.allowlist} are allowed. " "You can extend `tool.clint.typing-extensions-allowlist` in `pyproject.toml` if needed " "but make sure that the version requirement for `typing-extensions` is compatible with " "the added types." )
TypingExtensions
python
scipy__scipy
scipy/interpolate/tests/test_rgi.py
{ "start": 33755, "end": 34177 }
class ____: """ Minimal indexable object """ def __init__(self, shape): self.ndim = 2 self.shape = shape self._v = np.arange(np.prod(shape)).reshape(shape) def __getitem__(self, idx): return self._v[idx] def __array_interface__(self): return None def __array__(self, dtype=None, copy=None): raise RuntimeError("No array representation")
MyValue
python
pypa__pip
src/pip/_internal/index/package_finder.py
{ "start": 13892, "end": 21639 }
class ____: """ Responsible for filtering and sorting candidates for installation based on what tags are valid. """ @classmethod def create( cls, project_name: str, target_python: TargetPython | None = None, prefer_binary: bool = False, allow_all_prereleases: bool = False, specifier: specifiers.BaseSpecifier | None = None, hashes: Hashes | None = None, ) -> CandidateEvaluator: """Create a CandidateEvaluator object. :param target_python: The target Python interpreter to use when checking compatibility. If None (the default), a TargetPython object will be constructed from the running Python. :param specifier: An optional object implementing `filter` (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable versions. :param hashes: An optional collection of allowed hashes. """ if target_python is None: target_python = TargetPython() if specifier is None: specifier = specifiers.SpecifierSet() supported_tags = target_python.get_sorted_tags() return cls( project_name=project_name, supported_tags=supported_tags, specifier=specifier, prefer_binary=prefer_binary, allow_all_prereleases=allow_all_prereleases, hashes=hashes, ) def __init__( self, project_name: str, supported_tags: list[Tag], specifier: specifiers.BaseSpecifier, prefer_binary: bool = False, allow_all_prereleases: bool = False, hashes: Hashes | None = None, ) -> None: """ :param supported_tags: The PEP 425 tags supported by the target Python in order of preference (most preferred first). """ self._allow_all_prereleases = allow_all_prereleases self._hashes = hashes self._prefer_binary = prefer_binary self._project_name = project_name self._specifier = specifier self._supported_tags = supported_tags # Since the index of the tag in the _supported_tags list is used # as a priority, precompute a map from tag to index/priority to be # used in wheel.find_most_preferred_tag. self._wheel_tag_preferences = { tag: idx for idx, tag in enumerate(supported_tags) } def get_applicable_candidates( self, candidates: list[InstallationCandidate], ) -> list[InstallationCandidate]: """ Return the applicable candidates from a list of candidates. """ # Using None infers from the specifier instead. allow_prereleases = self._allow_all_prereleases or None specifier = self._specifier # When using the pkg_resources backend we turn the version object into # a str here because otherwise when we're debundled but setuptools isn't, # Python will see packaging.version.Version and # pkg_resources._vendor.packaging.version.Version as different # types. This way we'll use a str as a common data interchange # format. If we stop using the pkg_resources provided specifier # and start using our own, we can drop the cast to str(). if select_backend().NAME == "pkg_resources": candidates_and_versions: list[ tuple[InstallationCandidate, str | Version] ] = [(c, str(c.version)) for c in candidates] else: candidates_and_versions = [(c, c.version) for c in candidates] versions = set( specifier.filter( (v for _, v in candidates_and_versions), prereleases=allow_prereleases, ) ) applicable_candidates = [c for c, v in candidates_and_versions if v in versions] filtered_applicable_candidates = filter_unallowed_hashes( candidates=applicable_candidates, hashes=self._hashes, project_name=self._project_name, ) return sorted(filtered_applicable_candidates, key=self._sort_key) def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey: """ Function to pass as the `key` argument to a call to sorted() to sort InstallationCandidates by preference. Returns a tuple such that tuples sorting as greater using Python's default comparison operator are more preferred. The preference is as follows: First and foremost, candidates with allowed (matching) hashes are always preferred over candidates without matching hashes. This is because e.g. if the only candidate with an allowed hash is yanked, we still want to use that candidate. Second, excepting hash considerations, candidates that have been yanked (in the sense of PEP 592) are always less preferred than candidates that haven't been yanked. Then: If not finding wheels, they are sorted by version only. If finding wheels, then the sort order is by version, then: 1. existing installs 2. wheels ordered via Wheel.support_index_min(self._supported_tags) 3. source archives If prefer_binary was set, then all wheels are sorted above sources. Note: it was considered to embed this logic into the Link comparison operators, but then different sdist links with the same version, would have to be considered equal """ valid_tags = self._supported_tags support_num = len(valid_tags) build_tag: BuildTag = () binary_preference = 0 link = candidate.link if link.is_wheel: # can raise InvalidWheelFilename wheel = Wheel(link.filename) try: pri = -( wheel.find_most_preferred_tag( valid_tags, self._wheel_tag_preferences ) ) except ValueError: raise UnsupportedWheel( f"{wheel.filename} is not a supported wheel for this platform. It " "can't be sorted." ) if self._prefer_binary: binary_preference = 1 build_tag = wheel.build_tag else: # sdist pri = -(support_num) has_allowed_hash = int(link.is_hash_allowed(self._hashes)) yank_value = -1 * int(link.is_yanked) # -1 for yanked. return ( has_allowed_hash, yank_value, binary_preference, candidate.version, pri, build_tag, ) def sort_best_candidate( self, candidates: list[InstallationCandidate], ) -> InstallationCandidate | None: """ Return the best candidate per the instance's sort order, or None if no candidate is acceptable. """ if not candidates: return None best_candidate = max(candidates, key=self._sort_key) return best_candidate def compute_best_candidate( self, candidates: list[InstallationCandidate], ) -> BestCandidateResult: """ Compute and return a `BestCandidateResult` instance. """ applicable_candidates = self.get_applicable_candidates(candidates) best_candidate = self.sort_best_candidate(applicable_candidates) return BestCandidateResult( candidates, applicable_candidates=applicable_candidates, best_candidate=best_candidate, )
CandidateEvaluator
python
dagster-io__dagster
python_modules/dagster/dagster/_core/run_coordinator/queued_run_coordinator.py
{ "start": 3420, "end": 14924 }
class ____(RunCoordinator[T_DagsterInstance], ConfigurableClass): """Enqueues runs via the run storage, to be deqeueued by the Dagster Daemon process. Requires the Dagster Daemon process to be alive in order for runs to be launched. """ def __init__( self, max_concurrent_runs: Optional[int] = None, tag_concurrency_limits: Optional[Sequence[Mapping[str, Any]]] = None, dequeue_interval_seconds: Optional[int] = None, dequeue_use_threads: Optional[bool] = None, dequeue_num_workers: Optional[int] = None, max_user_code_failure_retries: Optional[int] = None, user_code_failure_retry_delay: Optional[int] = None, block_op_concurrency_limited_runs: Optional[Mapping[str, Any]] = None, inst_data: Optional[ConfigurableClassData] = None, ): self._inst_data: Optional[ConfigurableClassData] = check.opt_inst_param( inst_data, "inst_data", ConfigurableClassData ) self._max_concurrent_runs: int = check.opt_int_param( max_concurrent_runs, "max_concurrent_runs", 10 ) check.invariant( self._max_concurrent_runs >= -1, "Negative values other than -1 (which disables the limit) for max_concurrent_runs" " are disallowed.", ) self._tag_concurrency_limits: Sequence[Mapping[str, Any]] = check.opt_list_param( tag_concurrency_limits, "tag_concurrency_limits", ) self._dequeue_interval_seconds: int = check.opt_int_param( dequeue_interval_seconds, "dequeue_interval_seconds", 5 ) self._dequeue_use_threads: bool = check.opt_bool_param( dequeue_use_threads, "dequeue_use_threads", False ) self._dequeue_num_workers: Optional[int] = check.opt_int_param( dequeue_num_workers, "dequeue_num_workers" ) self._max_user_code_failure_retries: int = check.opt_int_param( max_user_code_failure_retries, "max_user_code_failure_retries", 0 ) self._user_code_failure_retry_delay: int = check.opt_int_param( user_code_failure_retry_delay, "user_code_failure_retry_delay", 60 ) self._should_block_op_concurrency_limited_runs: bool = bool( not block_op_concurrency_limited_runs or block_op_concurrency_limited_runs.get("enabled", True) ) self._explicitly_enabled_concurrency_run_blocking: bool = bool( block_op_concurrency_limited_runs and block_op_concurrency_limited_runs.get("enabled") ) self._op_concurrency_slot_buffer: int = ( block_op_concurrency_limited_runs.get("op_concurrency_slot_buffer", 0) if block_op_concurrency_limited_runs else 0 ) if self._op_concurrency_slot_buffer: check.invariant( self._should_block_op_concurrency_limited_runs, "op_concurrency_slot_buffer can only be set if block_op_concurrency_limited_runs " "is enabled", ) self._logger = logging.getLogger("dagster.run_coordinator.queued_run_coordinator") super().__init__() @property def inst_data(self) -> Optional[ConfigurableClassData]: return self._inst_data def get_run_queue_config(self) -> RunQueueConfig: return RunQueueConfig( max_concurrent_runs=self._max_concurrent_runs, tag_concurrency_limits=self._tag_concurrency_limits, max_user_code_failure_retries=self._max_user_code_failure_retries, user_code_failure_retry_delay=self._user_code_failure_retry_delay, should_block_op_concurrency_limited_runs=self._should_block_op_concurrency_limited_runs, op_concurrency_slot_buffer=self._op_concurrency_slot_buffer, explicitly_enabled_concurrency_run_blocking=self._explicitly_enabled_concurrency_run_blocking, ) @property def dequeue_interval_seconds(self) -> int: return self._dequeue_interval_seconds @property def dequeue_use_threads(self) -> bool: return self._dequeue_use_threads @property def dequeue_num_workers(self) -> Optional[int]: return self._dequeue_num_workers @property def should_block_op_concurrency_limited_runs(self) -> bool: return self._should_block_op_concurrency_limited_runs @property def op_concurrency_slot_buffer(self) -> int: return self._op_concurrency_slot_buffer @classmethod def config_type(cls) -> UserConfigSchema: return { "max_concurrent_runs": Field( config=IntSource, is_required=False, description=( "The maximum number of runs that are allowed to be in progress at once." " Defaults to 10. Set to -1 to disable the limit. Set to 0 to stop any runs" " from launching. Any other negative values are disallowed." ), ), "tag_concurrency_limits": Field( config=Noneable( Array( Shape( { "key": String, "value": Field( ScalarUnion( scalar_type=String, non_scalar_schema=Shape({"applyLimitPerUniqueValue": Bool}), ), is_required=False, ), "limit": Field(int), } ) ) ), is_required=False, description=( "A set of limits that are applied to runs with particular tags. If a value is" " set, the limit is applied to only that key-value pair. If no value is set," " the limit is applied across all values of that key. If the value is set to a" " dict with `applyLimitPerUniqueValue: true`, the limit will apply to the" " number of unique values for that key." ), ), "dequeue_interval_seconds": Field( config=IntSource, is_required=False, description=( "The interval in seconds at which the Dagster Daemon " "should periodically check the run queue for new runs to launch." ), ), "dequeue_use_threads": Field( config=bool, is_required=False, description=( "Whether or not to use threads for concurrency when launching dequeued runs." ), ), "dequeue_num_workers": Field( config=IntSource, is_required=False, description=( "If dequeue_use_threads is true, limit the number of concurrent worker threads." ), ), "max_user_code_failure_retries": Field( config=IntSource, is_required=False, default_value=0, description=( "If there is an error reaching a Dagster gRPC server while dequeuing the run," " how many times to retry the dequeue before failing it. The only run launcher" " that requires the gRPC server to be running is the DefaultRunLauncher, so" " setting this will have no effect unless that run launcher is being used." ), ), "user_code_failure_retry_delay": Field( config=IntSource, is_required=False, default_value=60, description=( "If there is an error reaching a Dagster gRPC server while dequeuing the run," " how long to wait before retrying any runs from that same code location. The" " only run launcher that requires the gRPC server to be running is the" " DefaultRunLauncher, so setting this will have no effect unless that run" " launcher is being used." ), ), "block_op_concurrency_limited_runs": Field( { "enabled": Field(Bool, is_required=False), "op_concurrency_slot_buffer": Field( int, is_required=False, description=( "Determines whether or not a run will be dequeued if it consists of ops that " "will all be initially blocked waiting for global op concurrency slots to be " "free." ), ), } ), } @classmethod def from_config_value( cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any] ) -> Self: return cls( inst_data=inst_data, max_concurrent_runs=config_value.get("max_concurrent_runs"), tag_concurrency_limits=config_value.get("tag_concurrency_limits"), dequeue_interval_seconds=config_value.get("dequeue_interval_seconds"), dequeue_use_threads=config_value.get("dequeue_use_threads"), dequeue_num_workers=config_value.get("dequeue_num_workers"), max_user_code_failure_retries=config_value.get("max_user_code_failure_retries"), user_code_failure_retry_delay=config_value.get("user_code_failure_retry_delay"), block_op_concurrency_limited_runs=config_value.get("block_op_concurrency_limited_runs"), ) def submit_run(self, context: SubmitRunContext) -> DagsterRun: dagster_run = context.dagster_run if dagster_run.status == DagsterRunStatus.NOT_STARTED: enqueued_event = DagsterEvent.job_enqueue(dagster_run) self._instance.report_dagster_event(enqueued_event, run_id=dagster_run.run_id) else: # the run was already submitted, this is a no-op self._logger.warning( f"submit_run called for run {dagster_run.run_id} with status " f"{dagster_run.status.value}, skipping enqueue." ) run = self._instance.get_run_by_id(dagster_run.run_id) if run is None: check.failed(f"Failed to reload run {dagster_run.run_id}") assert run return run def cancel_run(self, run_id: str) -> bool: run = self._instance.get_run_by_id(run_id) if not run: return False # NOTE: possible race condition if the dequeuer acts on this run at the same time # https://github.com/dagster-io/dagster/issues/3323 if run.status == DagsterRunStatus.QUEUED: self._instance.report_run_canceling( run, message="Canceling run from the queue.", ) self._instance.report_run_canceled(run) return True else: return self._instance.run_launcher.terminate(run_id)
QueuedRunCoordinator
python
chroma-core__chroma
chromadb/test/property/test_restart_persist.py
{ "start": 1192, "end": 3347 }
class ____(EmbeddingStateMachineBase): system: System def __init__(self, system: System) -> None: self.system = system client = Client.from_system(system) super().__init__(client) @initialize(collection=collection_persistent_st) # type: ignore @overrides def initialize(self, collection: strategies.Collection): self.client.reset() self.collection = self.client.create_collection( name=collection.name, metadata=collection.metadata, # type: ignore embedding_function=collection.embedding_function, ) self.embedding_function = collection.embedding_function trace("init") self.on_state_change(EmbeddingStateMachineStates.initialize) self.record_set_state = strategies.StateMachineRecordSet( ids=[], metadatas=[], documents=[], embeddings=[] ) @rule() def restart_system(self) -> None: # Simulates restarting chromadb self.system.stop() self.system = System(self.system.settings) self.system.start() self.client.clear_system_cache() self.client = Client.from_system(self.system) self.collection = self.client.get_collection( self.collection.name, embedding_function=self.embedding_function ) @overrides def teardown(self) -> None: super().teardown() # Need to manually stop the system to cleanup resources because we may have created a new system (above rule). # Normally, we wouldn't have to worry about this as the system from the fixture is shared between state machine runs. # (This helps avoid a "too many open files" error.) self.system.stop() def test_restart_persisted_client(sqlite_persistent: System) -> None: # TODO: This test is broken for rust bindings and should be fixed if sqlite_persistent.settings.chroma_api_impl != "chromadb.api.rust.RustBindingsAPI": run_state_machine_as_test( lambda: RestartablePersistedEmbeddingStateMachine(sqlite_persistent), ) # type: ignore
RestartablePersistedEmbeddingStateMachine
python
django__django
tests/dispatch/tests.py
{ "start": 325, "end": 551 }
class ____: def __call__(self, val, **kwargs): return val def a(self, val, **kwargs): return val a_signal = Signal() b_signal = Signal() c_signal = Signal() d_signal = Signal(use_caching=True)
Callable
python
PrefectHQ__prefect
tests/utilities/test_callables.py
{ "start": 24071, "end": 25276 }
class ____: def test_no_error_if_no_variadic_parameter(self): def foo(a, b): pass parameters = {"a": 1, "b": 2} new_params = callables.collapse_variadic_parameters(foo, parameters) assert new_params == parameters def test_no_error_if_variadic_parameter_and_kwargs_provided(self): def foo(a, b, **kwargs): pass parameters = {"a": 1, "b": 2, "c": 3, "d": 4} new_params = callables.collapse_variadic_parameters(foo, parameters) assert new_params == {"a": 1, "b": 2, "kwargs": {"c": 3, "d": 4}} def test_params_unchanged_if_variadic_parameter_and_no_kwargs_provided(self): def foo(a, b, **kwargs): pass parameters = {"a": 1, "b": 2} new_params = callables.collapse_variadic_parameters(foo, parameters) assert new_params == parameters def test_value_error_raised_if_extra_args_but_no_variadic_parameter(self): def foo(a, b): pass parameters = {"a": 1, "b": 2, "kwargs": {"c": 3, "d": 4}} with pytest.raises(ValueError): callables.collapse_variadic_parameters(foo, parameters)
TestCollapseVariadicParameter
python
plotly__plotly.py
plotly/graph_objs/layout/ternary/baxis/_tickformatstop.py
{ "start": 235, "end": 8537 }
class ____(_BaseLayoutHierarchyType): _parent_path_str = "layout.ternary.baxis" _path_str = "layout.ternary.baxis.tickformatstop" _valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"} @property def dtickrange(self): """ range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" The 'dtickrange' property is an info array that may be specified as: * a list or tuple of 2 elements where: (0) The 'dtickrange[0]' property accepts values of any type (1) The 'dtickrange[1]' property accepts values of any type Returns ------- list """ return self["dtickrange"] @dtickrange.setter def dtickrange(self, val): self["dtickrange"] = val @property def enabled(self): """ Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. The 'enabled' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["enabled"] @enabled.setter def enabled(self, val): self["enabled"] = val @property def name(self): """ When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. The 'name' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["name"] @name.setter def name(self, val): self["name"] = val @property def templateitemname(self): """ Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. The 'templateitemname' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["templateitemname"] @templateitemname.setter def templateitemname(self, val): self["templateitemname"] = val @property def value(self): """ string - dtickformat for described zoom level, the same as "tickformat" The 'value' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["value"] @value.setter def value(self, val): self["value"] = val @property def _prop_descriptions(self): return """\ dtickrange range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" enabled Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. name When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. templateitemname Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. value string - dtickformat for described zoom level, the same as "tickformat" """ def __init__( self, arg=None, dtickrange=None, enabled=None, name=None, templateitemname=None, value=None, **kwargs, ): """ Construct a new Tickformatstop object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.layout.ternary .baxis.Tickformatstop` dtickrange range [*min*, *max*], where "min", "max" - dtick values which describe some zoom level, it is possible to omit "min" or "max" value by passing "null" enabled Determines whether or not this stop is used. If `false`, this stop is ignored even within its `dtickrange`. name When used in a template, named items are created in the output figure in addition to any items the figure already has in this array. You can modify these items in the output figure by making your own item with `templateitemname` matching this `name` alongside your modifications (including `visible: false` or `enabled: false` to hide it). Has no effect outside of a template. templateitemname Used to refer to a named item in this array in the template. Named items from the template will be created even without a matching item in the input figure, but you can modify one by making an item with `templateitemname` matching its `name`, alongside your modifications (including `visible: false` or `enabled: false` to hide it). If there is no template or no matching item, this item will be hidden unless you explicitly show it with `visible: true`. value string - dtickformat for described zoom level, the same as "tickformat" Returns ------- Tickformatstop """ super().__init__("tickformatstops") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.layout.ternary.baxis.Tickformatstop constructor must be a dict or an instance of :class:`plotly.graph_objs.layout.ternary.baxis.Tickformatstop`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("dtickrange", arg, dtickrange) self._set_property("enabled", arg, enabled) self._set_property("name", arg, name) self._set_property("templateitemname", arg, templateitemname) self._set_property("value", arg, value) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Tickformatstop
python
airbytehq__airbyte
airbyte-integrations/connectors/source-salesforce/unit_tests/test_availability_strategy.py
{ "start": 328, "end": 2322 }
class ____(TestCase): def setUp(self) -> None: self._stream = Mock(spec=Stream) self._logger = Mock() self._error = HTTPError(response=Mock(spec=Response)) def test_given_status_code_is_not_forbidden_or_bad_request_when_handle_http_error_then_raise_error(self) -> None: availability_strategy = SalesforceAvailabilityStrategy() self._error.response.status_code = 401 with pytest.raises(HTTPError): availability_strategy.handle_http_error(self._stream, self._logger, _NO_SOURCE, self._error) def test_given_status_code_is_forbidden_when_handle_http_error_then_is_not_available_with_reason(self) -> None: availability_strategy = SalesforceAvailabilityStrategy() self._error.response.status_code = 403 self._error.response.json.return_value = [{}] is_available, reason = availability_strategy.handle_http_error(self._stream, self._logger, _NO_SOURCE, self._error) assert not is_available assert reason def test_given_status_code_is_bad_request_when_handle_http_error_then_is_not_available_with_reason(self) -> None: availability_strategy = SalesforceAvailabilityStrategy() self._error.response.status_code = 400 self._error.response.json.return_value = [{}] is_available, reason = availability_strategy.handle_http_error(self._stream, self._logger, _NO_SOURCE, self._error) assert not is_available assert reason def test_given_rate_limited_when_handle_http_error_then_is_available(self) -> None: availability_strategy = SalesforceAvailabilityStrategy() self._error.response.status_code = 400 self._error.response.json.return_value = [{"errorCode": "REQUEST_LIMIT_EXCEEDED"}] is_available, reason = availability_strategy.handle_http_error(self._stream, self._logger, _NO_SOURCE, self._error) assert is_available assert reason is None
SalesforceAvailabilityStrategyTest
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/unitofwork.py
{ "start": 20973, "end": 21576 }
class ____(_PostSortRec): __slots__ = "mapper", "isdelete", "sort_key" def __init__(self, uow, mapper, isdelete): self.mapper = mapper self.isdelete = isdelete self.sort_key = ("PostUpdateAll", mapper._sort_key, isdelete) @util.preload_module("sqlalchemy.orm.persistence") def execute(self, uow): persistence = util.preloaded.orm_persistence states, cols = uow.post_update_states[self.mapper] states = [s for s in states if uow.states[s][0] == self.isdelete] persistence._post_update(self.mapper, states, uow, cols)
_PostUpdateAll
python
great-expectations__great_expectations
great_expectations/data_context/store/database_store_backend.py
{ "start": 710, "end": 15402 }
class ____(StoreBackend): def __init__( # noqa: C901, PLR0912, PLR0913 # FIXME CoP self, table_name, key_columns, fixed_length_key=True, credentials=None, url=None, connection_string=None, engine=None, store_name=None, suppress_store_backend_id=False, manually_initialize_store_backend_id: str = "", **kwargs, ) -> None: super().__init__( fixed_length_key=fixed_length_key, suppress_store_backend_id=suppress_store_backend_id, manually_initialize_store_backend_id=manually_initialize_store_backend_id, store_name=store_name, ) if not sa: raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP "ModuleNotFoundError: No module named 'sqlalchemy'" ) if not self.fixed_length_key: raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP "DatabaseStoreBackend requires use of a fixed-length-key" ) self._schema_name = None self._credentials = credentials self._connection_string = connection_string self._url = url if engine is not None: if credentials is not None: logger.warning( "Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. " # noqa: E501 # FIXME CoP "Ignoring credentials." ) self.engine = engine elif credentials is not None: self.engine = self._build_engine(credentials=credentials, **kwargs) elif connection_string is not None: self.engine = sa.create_engine(connection_string, **kwargs) elif url is not None: parsed_url = make_url(url) self.drivername = parsed_url.drivername self.engine = sa.create_engine(url, **kwargs) else: raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP "Credentials, url, connection_string, or an engine are required for a DatabaseStoreBackend." # noqa: E501 # FIXME CoP ) meta = sa.MetaData(schema=self._schema_name) self.key_columns = key_columns # Dynamically construct a SQLAlchemy table with the name and column names we'll use cols = [] for column_ in key_columns: if column_ == "value": raise gx_exceptions.InvalidConfigError( # noqa: TRY003 # FIXME CoP "'value' cannot be used as a key_element name" ) cols.append(sa.Column(column_, sa.String, primary_key=True)) cols.append(sa.Column("value", sa.String)) try: table = sa.Table(table_name, meta, autoload_with=self.engine) # We do a "light" check: if the columns' names match, we will proceed, otherwise, create the table # noqa: E501 # FIXME CoP if {str(col.name).lower() for col in table.columns} != (set(key_columns) | {"value"}): raise gx_exceptions.StoreBackendError( # noqa: TRY003 # FIXME CoP f"Unable to use table {table_name}: it exists, but does not have the expected schema." # noqa: E501 # FIXME CoP ) except sqlalchemy.NoSuchTableError: table = sa.Table(table_name, meta, *cols) try: if self._schema_name: with self.engine.begin() as connection: connection.execute( sa.text(f"CREATE SCHEMA IF NOT EXISTS {self._schema_name};") ) meta.create_all(self.engine) except SQLAlchemyError as e: raise gx_exceptions.StoreBackendError( # noqa: TRY003 # FIXME CoP f"Unable to connect to table {table_name} because of an error. It is possible your table needs to be migrated to a new schema. SqlAlchemyError: {e!s}" # noqa: E501 # FIXME CoP ) self._table = table # Initialize with store_backend_id self._store_backend_id = None self._store_backend_id = self.store_backend_id # Gather the call arguments of the present function (include the "module_name" and add the "class_name"), filter # noqa: E501 # FIXME CoP # out the Falsy values, and set the instance "_config" variable equal to the resulting dictionary. # noqa: E501 # FIXME CoP self._config = { "table_name": table_name, "key_columns": key_columns, "fixed_length_key": fixed_length_key, "credentials": credentials, "url": url, "connection_string": connection_string, "engine": engine, "store_name": store_name, "suppress_store_backend_id": suppress_store_backend_id, "manually_initialize_store_backend_id": manually_initialize_store_backend_id, "module_name": self.__class__.__module__, "class_name": self.__class__.__name__, } self._config.update(kwargs) filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True) @property @override def store_backend_id(self) -> str: """ Create a store_backend_id if one does not exist, and return it if it exists Ephemeral store_backend_id for database_store_backend until there is a place to store metadata Returns: store_backend_id which is a UUID(version=4) """ # noqa: E501 # FIXME CoP if not self._store_backend_id: store_id = ( self._manually_initialize_store_backend_id if self._manually_initialize_store_backend_id else str(uuid.uuid4()) ) self._store_backend_id = f"{self.STORE_BACKEND_ID_PREFIX}{store_id}" return self._store_backend_id.replace(self.STORE_BACKEND_ID_PREFIX, "") def _build_engine(self, credentials, **kwargs) -> "sa.engine.Engine": # noqa: UP037 # FIXME CoP """ Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a private key path. """ # noqa: E501 # FIXME CoP # Update credentials with anything passed during connection time drivername = credentials.pop("drivername") create_engine_kwargs = kwargs self._schema_name = credentials.pop("schema", None) connect_args = credentials.pop("connect_args", None) if connect_args: create_engine_kwargs["connect_args"] = connect_args if "private_key_path" in credentials: options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url( drivername, credentials ) else: options = get_sqlalchemy_url(drivername, **credentials) self.drivername = drivername engine = sa.create_engine(options, **create_engine_kwargs) return engine @staticmethod def _get_sqlalchemy_key_pair_auth_url(drivername: str, credentials: dict) -> Tuple["URL", Dict]: # type: ignore[name-defined] # noqa F821 """ Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided values into a private key. If passphrase is incorrect, this will fail and an exception is raised. Args: drivername(str) - The name of the driver class credentials(dict) - A dictionary of database credentials used to access the database Returns: a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs. """ # noqa: E501 # FIXME CoP from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization private_key_path = credentials.pop("private_key_path") private_key_passphrase = credentials.pop("private_key_passphrase") with Path(private_key_path).expanduser().resolve().open(mode="rb") as key: try: p_key = serialization.load_pem_private_key( key.read(), password=private_key_passphrase.encode() if private_key_passphrase else None, backend=default_backend(), ) except ValueError as e: if "incorrect password" in str(e).lower(): raise gx_exceptions.DatasourceKeyPairAuthBadPassphraseError( datasource_name="SqlAlchemyDatasource", message="Decryption of key failed, was the passphrase incorrect?", ) from e else: raise e # noqa: TRY201 # FIXME CoP pkb = p_key.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ) credentials_driver_name = credentials.pop("drivername", None) create_engine_kwargs = {"connect_args": {"private_key": pkb}} return ( get_sqlalchemy_url(drivername or credentials_driver_name, **credentials), create_engine_kwargs, ) def _get(self, key): # type: ignore[explicit-override] # FIXME sel = ( sa.select(sa.column("value")) .select_from(self._table) .where( sa.and_( *( getattr(self._table.columns, key_col) == val for key_col, val in zip(self.key_columns, key, strict=False) ) ) ) ) try: with self.engine.begin() as connection: row = connection.execute(sel).fetchone()[0] return row except (IndexError, SQLAlchemyError) as e: logger.debug(f"Error fetching value: {e!s}") raise gx_exceptions.StoreError(f"Unable to fetch value for key: {key!s}") # noqa: TRY003 # FIXME CoP @override def _get_all(self) -> list[Any]: raise NotImplementedError @override def _set(self, key, value, allow_update=True, **kwargs) -> None: cols = {k: v for (k, v) in zip(self.key_columns, key, strict=False)} cols["value"] = value if allow_update: if self.has_key(key): ins = ( self._table.update() .where(getattr(self._table.columns, self.key_columns[0]) == key[0]) .values(**cols) ) else: ins = self._table.insert().values(**cols) # type: ignore[assignment] # FIXME CoP else: ins = self._table.insert().values(**cols) # type: ignore[assignment] # FIXME CoP try: with self.engine.begin() as connection: connection.execute(ins) except sqlalchemy.IntegrityError as e: if self._get(key) == value: logger.info(f"Key {key!s} already exists with the same value.") else: raise gx_exceptions.StoreBackendError( # noqa: TRY003 # FIXME CoP f"Integrity error {e!s} while trying to store key" ) @override def _move(self) -> None: # type: ignore[override] # FIXME CoP raise NotImplementedError @override def get_url_for_key(self, key, protocol=None) -> str: url = self._convert_engine_and_key_to_url(key) return url def _convert_engine_and_key_to_url(self, key): # SqlAlchemy engine URL is formatted in the following way # postgresql://postgres:password@localhost:5433/work # [engine]://[username]:[password]@[host]:[port]/[db_name] # which contains information like username and password that should not be public # This function changes the formatting to the following: # [engine]://[db_name]/[key] full_url = str(self.engine.url) engine_name = full_url.split("://")[0] db_name = full_url.split("/")[-1] return f"{engine_name}://{db_name}/{key[0]!s}" def _has_key(self, key): # type: ignore[explicit-override] # FIXME sel = ( sa.select(sa.func.count(sa.column("value"))) .select_from(self._table) .where( sa.and_( *( getattr(self._table.columns, key_col) == val for key_col, val in zip(self.key_columns, key, strict=False) ) ) ) ) try: with self.engine.begin() as connection: return connection.execute(sel).fetchone()[0] == 1 except (IndexError, SQLAlchemyError) as e: logger.debug(f"Error checking for value: {e!s}") return False def list_keys(self, prefix=()): # type: ignore[explicit-override] # FIXME columns = [sa.column(col) for col in self.key_columns] sel = ( sa.select(*columns) .select_from(self._table) .where( sa.and_( True, *( getattr(self._table.columns, key_col) == val for key_col, val in zip( self.key_columns[: len(prefix)], prefix, strict=False ) ), ) ) ) with self.engine.begin() as connection: row_list: list[sqlalchemy.Row] = connection.execute(sel).fetchall() return [tuple(row) for row in row_list] def remove_key(self, key): # type: ignore[explicit-override] # FIXME delete_statement = self._table.delete().where( sa.and_( *( getattr(self._table.columns, key_col) == val for key_col, val in zip(self.key_columns, key, strict=False) ) ) ) try: with self.engine.begin() as connection: return connection.execute(delete_statement) except SQLAlchemyError as e: raise gx_exceptions.StoreBackendError( # noqa: TRY003 # FIXME CoP f"Unable to delete key: got sqlalchemy error {e!s}" ) @property @override def config(self) -> dict: return self._config
DatabaseStoreBackend
python
keras-team__keras
keras/src/saving/saving_lib_test.py
{ "start": 1675, "end": 2445 }
class ____(MyDense): def build(self, input_shape): self.assets = ASSETS_DATA self.stored_variables = VARIABLES_DATA return super().build(input_shape) def save_assets(self, inner_path): with open(os.path.join(inner_path, "assets.txt"), "w") as f: f.write(self.assets) def save_own_variables(self, store): store["variables"] = self.stored_variables def load_assets(self, inner_path): with open(os.path.join(inner_path, "assets.txt"), "r") as f: text = f.read() self.assets = text def load_own_variables(self, store): self.stored_variables = np.array(store["variables"]) @keras.saving.register_keras_serializable(package="my_custom_package")
LayerWithCustomSaving
python
gwtw__py-sorting
test/insertion_sort_test.py
{ "start": 409, "end": 758 }
class ____(unittest.TestCase, BaseCustomComparisonSortTest, BasePositiveIntegerSortTest, BaseNegativeIntegerSortTest, BaseStringSortTest): def setUp(self): self.sort = insertion_sort.sort if __name__ == '__main__': unittest.main()
InsertionSortTest
python
ray-project__ray
python/ray/data/_internal/execution/operators/union_operator.py
{ "start": 470, "end": 5105 }
class ____(InternalQueueOperatorMixin, NAryOperator): """An operator that combines output blocks from two or more input operators into a single output.""" def __init__( self, data_context: DataContext, *input_ops: PhysicalOperator, ): """Create a UnionOperator. Args: input_ops: Operators generating input data for this operator to union. """ # By default, union does not preserve the order of output blocks. # To preserve the order, configure ExecutionOptions accordingly. self._preserve_order = False # Intermediary buffers used to store blocks from each input dependency. # Only used when `self._prserve_order` is True. self._input_buffers: List[BundleQueue] = [ FIFOBundleQueue() for _ in range(len(input_ops)) ] # The index of the input dependency that is currently the source of # the output buffer. New inputs from this input dependency will be added # directly to the output buffer. Only used when `self._preserve_order` is True. self._input_idx_to_output = 0 self._output_buffer: collections.deque[RefBundle] = collections.deque() self._stats: StatsDict = {"Union": []} super().__init__(data_context, *input_ops) def start(self, options: ExecutionOptions): # Whether to preserve the order of the input data (both the # order of the input operators and the order of the blocks within). self._preserve_order = options.preserve_order super().start(options) def num_outputs_total(self) -> Optional[int]: num_outputs = 0 for input_op in self.input_dependencies: input_num_outputs = input_op.num_outputs_total() if input_num_outputs is None: return None num_outputs += input_num_outputs return num_outputs def num_output_rows_total(self) -> Optional[int]: total_rows = 0 for input_op in self.input_dependencies: input_num_rows = input_op.num_output_rows_total() if input_num_rows is None: return None total_rows += input_num_rows return total_rows def internal_input_queue_num_blocks(self) -> int: return sum(q.num_blocks() for q in self._input_buffers) def internal_input_queue_num_bytes(self) -> int: return sum(q.estimate_size_bytes() for q in self._input_buffers) def internal_output_queue_num_blocks(self) -> int: return sum(len(q.blocks) for q in self._output_buffer) def internal_output_queue_num_bytes(self) -> int: return sum(q.size_bytes() for q in self._output_buffer) def clear_internal_input_queue(self) -> None: """Clear internal input queues.""" for input_buffer in self._input_buffers: while input_buffer: bundle = input_buffer.get_next() self._metrics.on_input_dequeued(bundle) def clear_internal_output_queue(self) -> None: """Clear internal output queue.""" while self._output_buffer: bundle = self._output_buffer.popleft() self._metrics.on_output_dequeued(bundle) def _add_input_inner(self, refs: RefBundle, input_index: int) -> None: assert not self.completed() assert 0 <= input_index <= len(self._input_dependencies), input_index if not self._preserve_order: self._output_buffer.append(refs) self._metrics.on_output_queued(refs) else: self._input_buffers[input_index].add(refs) self._metrics.on_input_queued(refs) def all_inputs_done(self) -> None: super().all_inputs_done() if not self._preserve_order: return assert len(self._output_buffer) == 0, len(self._output_buffer) for input_buffer in self._input_buffers: while input_buffer: refs = input_buffer.get_next() self._metrics.on_input_dequeued(refs) self._output_buffer.append(refs) self._metrics.on_output_queued(refs) def has_next(self) -> bool: # Check if the output buffer still contains at least one block. return len(self._output_buffer) > 0 def _get_next_inner(self) -> RefBundle: refs = self._output_buffer.popleft() self._metrics.on_output_dequeued(refs) return refs def get_stats(self) -> StatsDict: return self._stats def implements_accurate_memory_accounting(self): return True
UnionOperator
python
pypa__warehouse
tests/unit/admin/views/test_sponsors.py
{ "start": 2921, "end": 3210 }
class ____: def test_list_all_sponsors(self, db_request): SponsorFactory.create_batch(5) sponsors = db_request.db.query(Sponsor).order_by(Sponsor.name).all() result = views.sponsor_list(db_request) assert result == {"sponsors": sponsors}
TestSponsorList
python
walkccc__LeetCode
solutions/2191. Sort the Jumbled Numbers/2191.py
{ "start": 0, "end": 357 }
class ____: def sortJumbled(self, mapping: list[int], nums: list[int]) -> list[int]: def getMapped(num: int) -> int: mapped = [] for c in str(num): mapped.append(str(mapping[int(c)])) return int(''.join(mapped)) A = [(getMapped(num), i, num) for i, num in enumerate(nums)] return [num for _, i, num in sorted(A)]
Solution
python
dask__dask
dask/array/_array_expr/_blockwise.py
{ "start": 7116, "end": 10906 }
class ____(Blockwise): _parameters = ["op", "dtype", "name", "where"] _defaults = { "dtype": None, "name": None, "where": True, } align_arrays = True new_axes: dict = {} adjust_chunks = None concatenate = None @cached_property def _meta(self): return compute_meta( self._info[0], self.dtype, *self.elemwise_args, **self.kwargs ) @property def elemwise_args(self): return self.operands[len(self._parameters) :] @property def out_ind(self): shapes = [] for arg in self.elemwise_args: shape = getattr(arg, "shape", ()) if any(is_dask_collection(x) for x in shape): # Want to exclude Delayed shapes and dd.Scalar shape = () shapes.append(shape) if isinstance(self.where, ArrayExpr): shapes.append(self.where.shape) shapes = [s if isinstance(s, Iterable) else () for s in shapes] out_ndim = len( broadcast_shapes(*shapes) ) # Raises ValueError if dimensions mismatch return tuple(range(out_ndim))[::-1] @cached_property def _info(self): if self.operand("dtype") is not None: need_enforce_dtype = True dtype = self.operand("dtype") else: # We follow NumPy's rules for dtype promotion, which special cases # scalars and 0d ndarrays (which it considers equivalent) by using # their values to compute the result dtype: # https://github.com/numpy/numpy/issues/6240 # We don't inspect the values of 0d dask arrays, because these could # hold potentially very expensive calculations. Instead, we treat # them just like other arrays, and if necessary cast the result of op # to match. vals = [ ( np.empty((1,) * max(1, a.ndim), dtype=a.dtype) if not is_scalar_for_elemwise(a) else a ) for a in self.elemwise_args ] try: dtype = apply_infer_dtype( self.op, vals, {}, "elemwise", suggest_dtype=False ) except Exception: raise NotImplementedError need_enforce_dtype = any( not is_scalar_for_elemwise(a) and a.ndim == 0 for a in self.elemwise_args ) blockwise_kwargs = {} op = self.op if self.where is not True: blockwise_kwargs["elemwise_where_function"] = op op = _elemwise_handle_where if need_enforce_dtype: blockwise_kwargs.update( { "enforce_dtype": dtype, "enforce_dtype_function": op, } ) op = _enforce_dtype return op, dtype, blockwise_kwargs @property def func(self): return self._info[0] @property def dtype(self): return self._info[1] @property def kwargs(self): return self._info[2] @property def token(self): return funcname(self.op).strip("_") @property def args(self): # for Blockwise rather than Elemwise return tuple( toolz.concat( ( a, ( tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None ), ) for a in self.elemwise_args + ([self.where] if self.where is not True else []) ) )
Elemwise
python
redis__redis-py
redis/commands/search/hybrid_query.py
{ "start": 1947, "end": 2032 }
class ____(Enum): KNN = "KNN" RANGE = "RANGE" @experimental
VectorSearchMethods
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/type/definition.py
{ "start": 1962, "end": 3461 }
class ____(GraphQLType): """Scalar Type Definition The leaf values of any request and input values to arguments are Scalars (or Enums) and are defined with a name and a series of coercion functions used to ensure validity. Example: def coerce_odd(value): if value % 2 == 1: return value return None OddType = GraphQLScalarType(name='Odd', serialize=coerce_odd) """ __slots__ = 'name', 'description', 'serialize', 'parse_value', 'parse_literal' def __init__(self, name, description=None, serialize=None, parse_value=None, parse_literal=None): assert name, 'Type must be named.' assert_valid_name(name) self.name = name self.description = description assert callable(serialize), ( '{} must provide "serialize" function. If this custom Scalar is ' 'also used as an input type, ensure "parse_value" and "parse_literal" ' 'functions are also provided.' ).format(self) if parse_value is not None or parse_literal is not None: assert callable(parse_value) and callable(parse_literal), ( '{} must provide both "parse_value" and "parse_literal" functions.'.format(self) ) self.serialize = serialize self.parse_value = parse_value or none_func self.parse_literal = parse_literal or none_func def __str__(self): return self.name
GraphQLScalarType
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 597866, "end": 598147 }
class ____(sgqlc.types.Interface): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("repository",) repository = sgqlc.types.Field( sgqlc.types.non_null("Repository"), graphql_name="repository" )
RepositoryNode
python
pydantic__pydantic
pydantic-core/tests/benchmarks/nested_schema.py
{ "start": 196, "end": 2801 }
class ____: # __slots__ is not required, but it avoids __pydantic_fields_set__ falling into __dict__ __slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__' def schema_using_defs() -> cs.CoreSchema: definitions: list[cs.CoreSchema] = [ {'type': 'int', 'ref': 'int'}, { 'type': 'model', 'cls': MyModel, 'schema': { 'type': 'model-fields', 'fields': { str(c): {'type': 'model-field', 'schema': {'type': 'definition-ref', 'schema_ref': 'int'}} for c in range(N) }, }, 'ref': f'model_{N}', }, ] level = N for level in reversed(range(N)): definitions.append( { 'type': 'model', 'cls': MyModel, 'schema': { 'type': 'model-fields', 'fields': { str(c): { 'type': 'model-field', 'schema': {'type': 'definition-ref', 'schema_ref': f'model_{level + 1}'}, } for c in range(N) }, }, 'ref': f'model_{level}', } ) return { 'type': 'definitions', 'definitions': definitions, 'schema': {'type': 'definition-ref', 'schema_ref': 'model_0'}, } def inlined_schema() -> cs.CoreSchema: level = N schema: cs.CoreSchema = { 'type': 'model', 'cls': MyModel, 'schema': { 'type': 'model-fields', 'fields': {str(c): {'type': 'model-field', 'schema': {'type': 'int'}} for c in range(N)}, }, 'ref': f'model_{N}', } for level in reversed(range(N)): schema = { 'type': 'model', 'cls': MyModel, 'schema': { 'type': 'model-fields', 'fields': {str(c): {'type': 'model-field', 'schema': schema} for c in range(N)}, }, 'ref': f'model_{level}', } return schema def input_data_valid(levels: int = N) -> Any: data = {str(c): 1 for c in range(N)} for _ in range(levels): data = {str(c): data for c in range(N)} return data if __name__ == '__main__': from pydantic_core import SchemaValidator SchemaValidator(schema_using_defs()).validate_python(input_data_valid()) SchemaValidator(inlined_schema()).validate_python(input_data_valid())
MyModel
python
sqlalchemy__sqlalchemy
test/ext/asyncio/test_session.py
{ "start": 1989, "end": 2670 }
class ____(_AsyncFixture, _fixtures.FixtureTest): __requires__ = ("async_dialect",) @classmethod def setup_mappers(cls): cls._setup_stock_mapping() @testing.fixture def async_engine(self): return engines.testing_engine( asyncio=True, options={"sqlite_share_pool": True} ) # TODO: this seems to cause deadlocks in # OverrideSyncSession for some reason # @testing.fixture # def async_engine(self, async_testing_engine): # return async_testing_engine(options={"sqlite_share_pool": True}) @testing.fixture def async_session(self, async_engine): return AsyncSession(async_engine)
AsyncFixture
python
pennersr__django-allauth
allauth/account/managers.py
{ "start": 233, "end": 4251 }
class ____(models.Manager): def can_add_email(self, user): ret = True if app_settings.CHANGE_EMAIL: # We always allow adding an email in this case, regardless of # `MAX_EMAIL_ADDRESSES`, as adding actually adds a temporary email # that the user wants to change to. return True elif app_settings.MAX_EMAIL_ADDRESSES: count = self.filter(user=user).count() ret = count < app_settings.MAX_EMAIL_ADDRESSES return ret def get_new(self, user): """ Returns the email address the user is in the process of changing to, if any. """ return self.filter(user=user, verified=False).order_by("pk").last() def add_new_email( self, request: HttpRequest, user, email: str, send_verification: bool = True ): """ Adds an email address the user wishes to change to, replacing his current email address once confirmed. """ from allauth.account.internal.flows.email_verification import ( send_verification_email_to_address, ) with transaction.atomic(): instance = self.get_new(user) if instance: instance.delete() email = email.lower() instance = self.create(user=user, email=email) if send_verification: send_verification_email_to_address(request, instance) return instance def add_email(self, request, user, email, confirm=False, signup=False): from allauth.account.internal.flows.email_verification import ( send_verification_email_to_address, ) email = email.lower() email_address, created = self.get_or_create( user=user, email=email, defaults={"email": email} ) if created and confirm: send_verification_email_to_address(request, email_address) return email_address def get_verified(self, user): return self.filter(user=user, verified=True).order_by("-primary", "pk").first() def get_primary(self, user): try: return self.get(user=user, primary=True) except self.model.DoesNotExist: return None def get_primary_email(self, user) -> Optional[str]: from allauth.account.utils import user_email primary = self.get_primary(user) if primary: email = primary.email else: email = user_email(user) return email def get_users_for(self, email): # this is a list rather than a generator because we probably want to # do a len() on it right away return [ address.user for address in self.filter(verified=True, email=email.lower()) ] def fill_cache_for_user(self, user, addresses): """ In a multi-db setup, inserting records and re-reading them later on may result in not being able to find newly inserted records. Therefore, we maintain a cache for the user so that we can avoid database access when we need to re-read.. """ user._emailaddress_cache = addresses def get_for_user(self, user, email): cache_key = "_emailaddress_cache" addresses = getattr(user, cache_key, None) email = email.lower() if addresses is None: ret = self.get(user=user, email=email.lower()) # To avoid additional lookups when e.g. # EmailAddress.set_as_primary() starts touching self.user ret.user = user return ret else: for address in addresses: if address.email == email: return address raise self.model.DoesNotExist() def is_verified(self, email): return self.filter(email=email.lower(), verified=True).exists() def lookup(self, emails): return self.filter(email__in=[e.lower() for e in emails])
EmailAddressManager
python
keras-team__keras
keras/src/layers/preprocessing/image_preprocessing/max_num_bounding_box_test.py
{ "start": 150, "end": 2740 }
class ____(testing.TestCase): def test_max_num_bounding_boxes_basics(self): self.run_layer_test( layers.MaxNumBoundingBoxes, init_kwargs={ "max_number": 40, "fill_value": -1, }, input_shape=(12, 12, 3), expected_output_shape=(12, 12, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, run_training_check=False, ) def test_output_shapes(self): if backend.config.image_data_format() == "channels_last": image_shape = (10, 8, 3) else: image_shape = (3, 10, 8) input_image = np.random.random(image_shape) bounding_boxes = { "boxes": np.array( [ [2, 1, 4, 3], [6, 4, 8, 6], ] ), # Example boxes (normalized) "labels": np.array([1, 2]), # Dummy labels } layer = layers.MaxNumBoundingBoxes( max_number=40, bounding_box_format="xyxy" ) input_data = {"images": input_image, "bounding_boxes": bounding_boxes} output = layer(input_data) self.assertAllEqual(output["bounding_boxes"]["boxes"].shape, (40, 4)) self.assertAllEqual(output["bounding_boxes"]["labels"].shape, (40,)) def test_output_shapes_with_tf_data(self): if backend.config.image_data_format() == "channels_last": image_shape = (1, 10, 8, 3) else: image_shape = (1, 3, 10, 8) input_image = np.random.random(image_shape) bounding_boxes = { "boxes": np.array( [ [ [2, 1, 4, 3], [6, 4, 8, 6], ] ] ), # Example boxes (normalized) "labels": np.array([[1, 2]]), # Dummy labels } layer = layers.MaxNumBoundingBoxes( max_number=40, bounding_box_format="xyxy" ) input_data = {"images": input_image, "bounding_boxes": bounding_boxes} ds = tf_data.Dataset.from_tensor_slices(input_data) ds = ds.map(layer) ds = ds.batch(1) output = next(iter(ds)) self.assertAllEqual(output["bounding_boxes"]["boxes"].shape, (1, 40, 4)) self.assertAllEqual(output["bounding_boxes"]["labels"].shape, (1, 40))
MaxNumBoundingBoxesTest
python
django__django
django/contrib/auth/hashers.py
{ "start": 19957, "end": 22409 }
class ____(BasePasswordHasher): """ Secure password hashing using the Scrypt algorithm. """ algorithm = "scrypt" block_size = 8 maxmem = 0 parallelism = 5 work_factor = 2**14 def encode(self, password, salt, n=None, r=None, p=None): self._check_encode_args(password, salt) n = n or self.work_factor r = r or self.block_size p = p or self.parallelism hash_ = hashlib.scrypt( password=force_bytes(password), salt=force_bytes(salt), n=n, r=r, p=p, maxmem=self.maxmem, dklen=64, ) hash_ = base64.b64encode(hash_).decode("ascii").strip() return "%s$%d$%s$%d$%d$%s" % (self.algorithm, n, force_str(salt), r, p, hash_) def decode(self, encoded): algorithm, work_factor, salt, block_size, parallelism, hash_ = encoded.split( "$", 6 ) assert algorithm == self.algorithm return { "algorithm": algorithm, "work_factor": int(work_factor), "salt": salt, "block_size": int(block_size), "parallelism": int(parallelism), "hash": hash_, } def verify(self, password, encoded): decoded = self.decode(encoded) encoded_2 = self.encode( password, decoded["salt"], decoded["work_factor"], decoded["block_size"], decoded["parallelism"], ) return constant_time_compare(encoded, encoded_2) def safe_summary(self, encoded): decoded = self.decode(encoded) return { _("algorithm"): decoded["algorithm"], _("work factor"): decoded["work_factor"], _("block size"): decoded["block_size"], _("parallelism"): decoded["parallelism"], _("salt"): mask_hash(decoded["salt"]), _("hash"): mask_hash(decoded["hash"]), } def must_update(self, encoded): decoded = self.decode(encoded) return ( decoded["work_factor"] != self.work_factor or decoded["block_size"] != self.block_size or decoded["parallelism"] != self.parallelism ) def harden_runtime(self, password, encoded): # The runtime for Scrypt is too complicated to implement a sensible # hardening algorithm. pass
ScryptPasswordHasher
python
sphinx-doc__sphinx
sphinx/addnodes.py
{ "start": 7055, "end": 7635 }
class ____(nodes.Part, nodes.Inline, nodes.FixedTextElement): """Node for a general parameter list. As default the parameter list is written in line with the rest of the signature. Set ``multi_line_parameter_list = True`` to describe a multi-line parameter list. In that case each parameter will then be written on its own, indented line. A trailing comma will be added on the last line if ``multi_line_trailing_comma`` is True. """ child_text_separator = ', ' def astext(self) -> str: return f'({super().astext()})'
desc_parameterlist
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 21210, "end": 21298 }
class ____(str, Enum): LITTLE = "little" BIG = "big" OTHER = "other"
CpuEndian
python
conda__conda
conda/auxlib/entity.py
{ "start": 21151, "end": 22168 }
class ____(Field): _type = frozendict def __init__( self, default=NULL, required=True, validation=None, in_dump=True, default_in_dump=True, nullable=False, immutable=True, aliases=(), ): super().__init__( default, required, validation, in_dump, default_in_dump, nullable, immutable, aliases ) def box(self, instance, instance_type, val): # TODO: really need to make this recursive to make any lists or maps immutable if val is None: return self._type() elif isiterable(val): val = deepfreeze(val) if not isinstance(val, Mapping): raise ValidationError( val, msg=f"Cannot assign a non-iterable value to {self.name}" ) return val else: raise ValidationError( val, msg=f"Cannot assign a non-iterable value to {self.name}" )
MapField
python
huggingface__transformers
tests/models/t5/test_modeling_t5.py
{ "start": 39222, "end": 84489 }
class ____(unittest.TestCase): def tearDown(self): # See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed. cleanup(torch_device, gc_collect=False) @cached_property def model(self): return T5ForConditionalGeneration.from_pretrained("google-t5/t5-base").to(torch_device) @cached_property def tokenizer(self): return T5Tokenizer.from_pretrained("google-t5/t5-base") @slow def test_torch_quant(self): r""" Test that a simple `torch.quantization.quantize_dynamic` call works on a T5 model. """ model_name = "google/flan-t5-small" tokenizer = T5Tokenizer.from_pretrained(model_name) model = T5ForConditionalGeneration.from_pretrained(model_name) model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8) input_text = "Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet?" input_ids = tokenizer(input_text, return_tensors="pt").input_ids _ = model.generate(input_ids) @slow def test_small_generation(self): model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="pt").input_ids.to(torch_device) sequences = model.generate(input_ids, max_length=8, num_beams=1, do_sample=False) output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_small_integration_test(self): """ For comparison run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = Expectations( { (None, None): -19.0845, ("rocm", (9, 4)): -19.0846, } ).get_expectation() self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): """ For comparison run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = T5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -59.0293 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): """ For comparison run: >>> import t5 # pip install t5==0.9.1 >>> path_to_byt5_small_checkpoint = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None) >>> vocab = t5.data.ByteVocabulary() >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = T5ForConditionalGeneration.from_pretrained("google/byt5-small").to(torch_device) tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -60.7397 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_summarization(self): model = self.model tok = self.tokenizer FRANCE_ARTICLE = ( # @noqa "Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( "(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( "New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) expected_summaries = [ "<pad> " 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a' " cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one" " magazine says ." "</s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s>", "<pad> " "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a" " preliminary examination into the situation in the occupied Palestinian territory . as members of the" " court, Palestinians may be subject to counter-charges as well ." "</s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s>", "<pad> " "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:" " the debate that has already begun since the announcement of the new framework will likely result in more" " heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and" " implement a rigorous inspection regime ." "</s>", "<pad> " "prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two" ' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10' " times, with nine of her marriages occurring between 1999 and 2002 ." "</s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s>", ] use_task_specific_params(model, "summarization") dct = tok( [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], padding="max_length", truncation=True, max_length=512, return_tensors="pt", ).to(torch_device) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = tok.batch_decode(hypotheses_batch) self.assertListEqual(expected_summaries, decoded) @slow def test_translation_en_to_de(self): model = self.model tok = self.tokenizer use_task_specific_params(model, "translation_en_to_de") en_text = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.' expected_translation = ( '<pad> "Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.</s>' ) input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt") input_ids = input_ids.to(torch_device) output = model.generate(input_ids) translation = tok.decode(output[0]) self.assertEqual(translation, expected_translation) @slow def test_translation_en_to_fr(self): model = self.model # google-t5/t5-base tok = self.tokenizer use_task_specific_params(model, "translation_en_to_fr") en_text = ( ' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of' " countless generations of stars: the oldest stars are seen as blue dots. " ) input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt") input_ids = input_ids.to(torch_device) output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=100, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0]) new_truncated_translation = ( "<pad> " "Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre " "un " "« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées " "sous forme " "de points bleus." "</s>" ) self.assertEqual(translation, new_truncated_translation) @slow def test_translation_en_to_ro(self): model = self.model tok = self.tokenizer use_task_specific_params(model, "translation_en_to_ro") en_text = "Taco Bell said it plans to add 2,000 locations in the US by 2022." expected_translation = ( "<pad> Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022.</s>" ) inputs = tok(model.config.prefix + en_text, return_tensors="pt").to(torch_device) output = model.generate(**inputs) translation = tok.decode(output[0]) self.assertEqual(translation, expected_translation) # TODO joao, manuel: remove this in v4.62.0 @slow def test_contrastive_search_t5(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) article = "summarize: " + article.strip() t5_tokenizer = AutoTokenizer.from_pretrained("flax-community/t5-base-cnn-dm") t5_model = T5ForConditionalGeneration.from_pretrained("flax-community/t5-base-cnn-dm").to(torch_device) input_ids = t5_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="pt" ).input_ids.to(torch_device) outputs = t5_model.generate( input_ids, penalty_alpha=0.5, top_k=5, max_length=64, trust_remote_code=True, custom_generate="transformers-community/contrastive-search", ) generated_text = t5_tokenizer.batch_decode(outputs, skip_special_tokens=True) # TODO: @arthur? # PR #31938 caused regression on this test which was fixed by PR #34089 self.assertListEqual( generated_text, [ "Liana Barrientos has been married 10 times, nine of them in the Bronx . Her husbands filed for " "permanent residence after the marriages, prosecutors say ." ], ) @slow @require_torch_accelerator @pytest.mark.torch_compile_test def test_compile_static_cache(self): NUM_TOKENS_TO_GENERATE = 40 EXPECTED_TEXT_COMPLETION = [ "theory of relativity states that 1) the speed of light is constant in all inertial reference frames. the laws of physics are the same for all inertial reference frames.", "ketchup is my favorite condiment.", ] prompts = [ "summarize: Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial " "reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe " "theory of relativity is not hard to grasp.", "summarize: My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, " "my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my pizza.", ] model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) # Dynamic Cache generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False) dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text) # Static Cache generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text) # Static Cache + compile model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text) @slow @require_torch_accelerator @pytest.mark.torch_compile_test def test_compile_static_cache_encoder(self): prompts = [ "summarize: Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial " "reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe " "theory of relativity is not hard to grasp.", "summarize: My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, " "my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my pizza.", ] model = T5EncoderModel.from_pretrained("google-t5/t5-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) logits = model(**inputs) model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) logits_compiled = model(**inputs) torch.testing.assert_close(logits[0][:, -3:, -3], logits_compiled[0][:, -3:, -3], rtol=1e-5, atol=1e-5) @pytest.mark.torch_export_test @slow def test_export_encoder(self): """Test exporting T5EncoderModel to torch export format.""" if not is_torch_greater_or_equal_than_2_4: self.skipTest("This test requires torch >= 2.4 to run.") from transformers.integrations.executorch import Seq2SeqLMEncoderExportableModule model_id = "google-t5/t5-small" device = "cpu" example_input_ids = torch.ones((1, 10), dtype=torch.long).to(device) # Load model model = T5EncoderModel.from_pretrained(model_id).to(device=device).eval() # Get original output for comparison with torch.no_grad(): original_output = model(input_ids=example_input_ids).last_hidden_state encoder_model = Seq2SeqLMEncoderExportableModule(model) # Export the encoder_model with torch.no_grad(): seq_len_dim = torch.export.Dim("sequence_length", max=4096) exported_program = torch.export.export( encoder_model, (example_input_ids,), dynamic_shapes={"input_ids": {1: seq_len_dim}}, strict=True ) # Test the exported model with torch.no_grad(): exported_output = exported_program.module()(example_input_ids) # Verify outputs are close enough self.assertTrue(torch.allclose(original_output, exported_output, atol=1e-5)) @pytest.mark.torch_export_test @slow def test_export_decoder(self): """Test exporting T5 decoder with static cache to torch export format.""" if not is_torch_greater_or_equal_than_2_4: self.skipTest("This test requires torch >= 2.4 to run.") from transformers import AutoModelForSeq2SeqLM, T5ForConditionalGeneration from transformers.integrations.executorch import Seq2SeqLMDecoderExportableModuleWithStaticCache model_id = "google-t5/t5-small" # Configuration for static cache batch_size = 1 max_cache_len = 123 device = "cpu" full_model = AutoModelForSeq2SeqLM.from_pretrained(model_id).to(device) self.assertIsInstance(full_model, T5ForConditionalGeneration) decoder_model = ( Seq2SeqLMDecoderExportableModuleWithStaticCache(full_model, max_cache_len, batch_size).to(device).eval() ) # Prepare test inputs example_decoder_input_ids = torch.tensor([[0]], dtype=torch.long) # Start token example_cache_position = torch.tensor([0], dtype=torch.long) # For T5-small, hidden size is 512 example_encoder_hidden_states = torch.zeros((batch_size, 10, 512), dtype=torch.float32) # Export the model with torch.no_grad(): encoder_sequence_length_dim = torch.export.Dim("encoder_sequence_length", max=4096) exported_program = torch.export.export( decoder_model, (example_decoder_input_ids, example_encoder_hidden_states, example_cache_position), dynamic_shapes={ "decoder_input_ids": None, "encoder_hidden_states": {1: encoder_sequence_length_dim}, "cache_position": None, }, strict=True, ) # We won't directly verify outputs here as it's complicated with caching, # but we'll check the export was successful self.assertIsNotNone(exported_program) # Verify cache buffers existence and shapes cache_buffers = [ (name, buffer) for name, buffer in exported_program.named_buffers() if name.startswith("key_cache_") or name.startswith("value_cache_") ] # Verify cache buffers self.assertTrue(len(cache_buffers) > 0, "No cache buffers found in exported model") for name, buffer in cache_buffers: # Verify cache buffers are 3D self.assertEqual(buffer.shape[2], max_cache_len) @pytest.mark.torch_export_test @slow def test_export_t5_summarization(self): """Test composing exported T5 encoder and decoder for summarization.""" if not is_torch_greater_or_equal_than_2_4: self.skipTest("This test requires torch >= 2.4 to run.") from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, T5ForConditionalGeneration from transformers.integrations.executorch import Seq2SeqLMExportableModule device = torch_device batch_size = 1 max_cache_length = 1234 max_hidden_seq_length = 5678 model_id = "google-t5/t5-small" tokenizer = AutoTokenizer.from_pretrained(model_id) full_model = AutoModelForSeq2SeqLM.from_pretrained(model_id).to(device).eval() self.assertIsInstance(full_model, T5ForConditionalGeneration) wrapped_model = Seq2SeqLMExportableModule( full_model, batch_size=batch_size, max_hidden_seq_length=max_hidden_seq_length, max_cache_length=max_cache_length, ) exported_t5 = wrapped_model.export() # Test Summarization with Composed Models prompts = [ "summarize: Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial " "reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe " "theory of relativity is not hard to grasp." ] input_ids = tokenizer(prompts, return_tensors="pt").input_ids generated_ids = exported_t5.generate(prompt_token_ids=input_ids, max_new_tokens=max_cache_length) generated_summary = tokenizer.decode(generated_ids, skip_special_tokens=True) # Also run original model for comparison original_model = T5ForConditionalGeneration.from_pretrained(model_id).eval() with torch.no_grad(): original_outputs = original_model.generate(input_ids, max_length=50, num_beams=1) original_summary = tokenizer.decode(original_outputs[0], skip_special_tokens=True) # Basic verification that we got a reasonable summary self.assertEqual(generated_summary, original_summary) @require_torch
T5ModelIntegrationTests
python
great-expectations__great_expectations
docs/docusaurus/docs/cloud/alerts/_examples/create_a_custom_action.py
{ "start": 659, "end": 3324 }
class ____(ValidationAction): # </snippet> # 2. Set the `type` attribute to a unique string that identifies the Action. # <snippet name="docs/docusaurus/docs/cloud/alerts/_examples/create_a_custom_action.py - set type"> type: Literal["my_custom_cloud_action"] = "my_custom_cloud_action" # </snippet> # 3. Optional. Add any additional fields your Action requires at runtime. # <snippet name="docs/docusaurus/docs/cloud/alerts/_examples/create_a_custom_action.py - add custom fields"> my_custom_str_field: str jira_api_url: str jira_project_key: str # </snippet> # 4. Override the `run()` method to perform the desired task. # <snippet name="docs/docusaurus/docs/cloud/alerts/_examples/create_a_custom_action.py - override run"> @override def run( self, checkpoint_result: CheckpointResult, action_context: Union[ ActionContext, None ], # Contains results from prior Actions in the same Checkpoint run. ) -> dict: # Domain-specific logic self._do_my_custom_action(checkpoint_result) # Optional. Access custom fields you provide the Action at runtime. extra_context = self.my_custom_str_field jira_api_url = self.jira_api_url jira_project_key = self.jira_project_key # Return information about the Action return { "some": "info", "extra_context": extra_context, "jira_api_url": jira_api_url, "jira_project_key": jira_project_key, } def _do_my_custom_action(self, checkpoint_result: CheckpointResult): # Perform custom logic based on the validation results. # from jira import JIRA # Replace with your Jira email address and Jira Personal Access Token (PAT) # jira_email_address = "<YOUR_JIRA_EMAIL_ADDRESS>" # pat = "<YOUR-PERSONAL-ACCESS-TOKEN>" # Create a Jira connection using the JIRA library # jira = JIRA(server=self.jira_api_url, basic_auth=(jira_email_address, pat)) # Issue data (replace with your own data) # issue_data = { # "project": {"key": self.jira_project_key}, # "summary": "New issue created for GX Cloud Validation result", # "description": "This issue has been created based on the result of a Validation in GX Cloud.", # "issuetype": {"name": "Task"}, # } # try: # new_issue = jira.create_issue(fields=issue_data) # except Exception as e: # print("Failed to create issue:", str(e)) ... # </snippet> # </snippet>
MyCustomAction
python
tensorflow__tensorflow
tensorflow/python/autograph/utils/tensor_list_test.py
{ "start": 1173, "end": 3444 }
class ____(test.TestCase): def _shape(self, shape_tuple): return constant(shape_tuple, dtypes.int32) def test_dynamic_list_append(self): l = [] l = tl.dynamic_list_append(l, 1) self.assertListEqual(l, [1]) l = list_ops.empty_tensor_list(self._shape(()), dtypes.int32) l = tl.dynamic_list_append(l, 1) s = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32) self.assertAllEqual(s, [1]) l = tensor_array_ops.TensorArray(dtypes.int32, size=0, dynamic_size=True) l = tl.dynamic_list_append(l, 1) s = l.stack() self.assertAllEqual(s, [1]) l = tl.TensorList(self._shape(()), dtypes.int32) l = tl.dynamic_list_append(l, 1) self.assertAllEqual(l[0], 1) def test_list_append_python(self): with context.eager_mode(): a = constant(3.0) l = tl.TensorList(a.shape, a.dtype) l.append(a) self.assertEqual(l.count().numpy(), 1) l.append(a) self.assertEqual(l.count().numpy(), 2) _ = l.pop() self.assertEqual(l.count().numpy(), 1) a2 = l.pop() self.assertEqual(l.count().numpy(), 0) self.assertEqual(a.numpy(), a2.numpy()) def test_list_index_python(self): with context.eager_mode(): a = constant(3.0) b = constant(2.0) l = tl.TensorList(a.shape, a.dtype) l.append(a) self.assertEqual(l[0].numpy(), a.numpy()) l[0] = ops.convert_to_tensor(b) self.assertEqual(l[0].numpy(), b.numpy()) @test_util.run_deprecated_v1 def test_list_append_tf(self): a = constant(3.0) l = tl.TensorList(a.shape, a.dtype) l.append(a) c1 = l.count() l.append(a) c2 = l.count() _ = l.pop() c3 = l.count() a2 = l.pop() c4 = l.count() c1, c2, c3, c4, a, a2 = self.evaluate([c1, c2, c3, c4, a, a2]) self.assertEqual(c1, 1) self.assertEqual(c2, 2) self.assertEqual(c3, 1) self.assertEqual(c4, 0) self.assertEqual(a, a2) def test_list_index_tf(self): a = constant(3.0) b = constant(2.0) l = tl.TensorList(a.shape, a.dtype) l.append(a) l0 = l[0] l[0] = b l1 = l[0] l0, l1, a, b = self.evaluate([l0, l1, a, b]) self.assertEqual(l0, a) self.assertEqual(l1, b) if __name__ == '__main__': test.main()
TensorListTest
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B021.py
{ "start": 520, "end": 631 }
class ____: f"hello {VARIABLE}!" def foo1(): "hello world!" def foo2(): f"hello {VARIABLE}!"
bar2
python
plotly__plotly.py
tests/test_core/test_graph_objs/test_properties_validated.py
{ "start": 77, "end": 1552 }
class ____(TestCase): def setUp(self): # Construct initial scatter object self.scatter = go.Scatter() self.scatter.name = "Scatter 1" def test_validators_work_attr(self): """ Note: all of the individual validators are tested in `tests/test_plotly_utils/validators`. Here we're just making sure that datatypes make use of validators """ with pytest.raises(ValueError): self.scatter.name = [1, 2, 3] def test_validators_work_item(self): """ Note: all of the individual validators are tested in `tests/test_plotly_utils/validators`. Here we're just making sure that datatypes make use of validators """ with pytest.raises(ValueError): self.scatter["name"] = [1, 2, 3] def test_invalid_attr_assignment(self): with pytest.raises(ValueError): self.scatter.bogus = 87 def test_invalid_item_assignment(self): with pytest.raises(ValueError): self.scatter["bogus"] = 87 def test_invalid_dot_assignment(self): with pytest.raises(ValueError): self.scatter["marker.bogus"] = 87 def test_invalid_tuple_assignment(self): with pytest.raises(ValueError): self.scatter[("marker", "bogus")] = 87 def test_invalid_constructor_kwarg(self): with pytest.raises(ValueError): go.Scatter(bogus=87)
TestPropertyValidation
python
sqlalchemy__sqlalchemy
test/base/test_utils.py
{ "start": 92068, "end": 92429 }
class ____(fixtures.TestBase): def test_simple(self): class A: something = {"foo": 1} class B(A): @classproperty def something(cls): d = dict(super().something) d.update({"bazz": 2}) return d eq_(B.something, {"foo": 1, "bazz": 2})
TestClassProperty
python
google__python-fire
fire/docstrings.py
{ "start": 3088, "end": 3179 }
class ____(enum.Enum): ARGS = 0 RETURNS = 1 YIELDS = 2 RAISES = 3 TYPE = 4
Sections
python
pyca__cryptography
src/cryptography/x509/ocsp.py
{ "start": 577, "end": 662 }
class ____(utils.Enum): HASH = "By Hash" NAME = "By Name"
OCSPResponderEncoding
python
coleifer__peewee
peewee.py
{ "start": 81853, "end": 90286 }
class ____(_WriteQuery): SIMPLE = 0 QUERY = 1 MULTI = 2 class DefaultValuesException(Exception): pass def __init__(self, table, insert=None, columns=None, on_conflict=None, **kwargs): super(Insert, self).__init__(table, **kwargs) self._insert = insert self._columns = columns self._on_conflict = on_conflict self._query_type = None self._as_rowcount = False def where(self, *expressions): raise NotImplementedError('INSERT queries cannot have a WHERE clause.') @Node.copy def as_rowcount(self, _as_rowcount=True): self._as_rowcount = _as_rowcount @Node.copy def on_conflict_ignore(self, ignore=True): self._on_conflict = OnConflict('IGNORE') if ignore else None @Node.copy def on_conflict_replace(self, replace=True): self._on_conflict = OnConflict('REPLACE') if replace else None @Node.copy def on_conflict(self, *args, **kwargs): self._on_conflict = (OnConflict(*args, **kwargs) if (args or kwargs) else None) def _simple_insert(self, ctx): if not self._insert: raise self.DefaultValuesException('Error: no data to insert.') return self._generate_insert((self._insert,), ctx) def get_default_data(self): return {} def get_default_columns(self): if self.table._columns: return [getattr(self.table, col) for col in self.table._columns if col != self.table._primary_key] def _generate_insert(self, insert, ctx): rows_iter = iter(insert) columns = self._columns # Load and organize column defaults (if provided). defaults = self.get_default_data() # First figure out what columns are being inserted (if they weren't # specified explicitly). Resulting columns are normalized and ordered. if not columns: try: row = next(rows_iter) except StopIteration: raise self.DefaultValuesException('Error: no rows to insert.') if not isinstance(row, Mapping): columns = self.get_default_columns() if columns is None: raise ValueError('Bulk insert must specify columns.') else: # Infer column names from the dict of data being inserted. accum = [] for column in row: if isinstance(column, basestring): column = getattr(self.table, column) accum.append(column) # Add any columns present in the default data that are not # accounted for by the dictionary of row data. column_set = set(accum) for col in (set(defaults) - column_set): accum.append(col) columns = sorted(accum, key=lambda obj: obj.get_sort_key(ctx)) rows_iter = itertools.chain(iter((row,)), rows_iter) else: clean_columns = [] seen = set() for column in columns: if isinstance(column, basestring): column_obj = getattr(self.table, column) else: column_obj = column clean_columns.append(column_obj) seen.add(column_obj) columns = clean_columns for col in sorted(defaults, key=lambda obj: obj.get_sort_key(ctx)): if col not in seen: columns.append(col) fk_fields = set() nullable_columns = set() value_lookups = {} for column in columns: lookups = [column, column.name] if isinstance(column, Field): if column.name != column.column_name: lookups.append(column.column_name) if column.null: nullable_columns.add(column) if isinstance(column, ForeignKeyField): fk_fields.add(column) value_lookups[column] = lookups ctx.sql(EnclosedNodeList(columns)).literal(' VALUES ') columns_converters = [ (column, column.db_value if isinstance(column, Field) else None) for column in columns] all_values = [] for row in rows_iter: values = [] is_dict = isinstance(row, Mapping) for i, (column, converter) in enumerate(columns_converters): try: if is_dict: # The logic is a bit convoluted, but in order to be # flexible in what we accept (dict keyed by # column/field, field name, or underlying column name), # we try accessing the row data dict using each # possible key. If no match is found, throw an error. for lookup in value_lookups[column]: try: val = row[lookup] except KeyError: pass else: break else: raise KeyError else: val = row[i] except (KeyError, IndexError): if column in defaults: val = defaults[column] if callable_(val): val = val() elif column in nullable_columns: val = None else: raise ValueError('Missing value for %s.' % column.name) if not isinstance(val, Node) or (isinstance(val, Model) and column in fk_fields): val = Value(val, converter=converter, unpack=False) values.append(val) all_values.append(EnclosedNodeList(values)) if not all_values: raise self.DefaultValuesException('Error: no data to insert.') with ctx.scope_values(subquery=True): return ctx.sql(CommaNodeList(all_values)) def _query_insert(self, ctx): return (ctx .sql(EnclosedNodeList(self._columns)) .literal(' ') .sql(self._insert)) def _default_values(self, ctx): if not self._database: return ctx.literal('DEFAULT VALUES') return self._database.default_values_insert(ctx) def __sql__(self, ctx): super(Insert, self).__sql__(ctx) with ctx.scope_values(): stmt = None if self._on_conflict is not None: stmt = self._on_conflict.get_conflict_statement(ctx, self) (ctx .sql(stmt or SQL('INSERT')) .literal(' INTO ') .sql(self.table) .literal(' ')) if isinstance(self._insert, Mapping) and not self._columns: try: self._simple_insert(ctx) except self.DefaultValuesException: self._default_values(ctx) self._query_type = Insert.SIMPLE elif isinstance(self._insert, (SelectQuery, SQL)): self._query_insert(ctx) self._query_type = Insert.QUERY else: self._generate_insert(self._insert, ctx) self._query_type = Insert.MULTI if self._on_conflict is not None: update = self._on_conflict.get_conflict_update(ctx, self) if update is not None: ctx.literal(' ').sql(update) return self.apply_returning(ctx) def _execute(self, database): if self._returning is None and database.returning_clause \ and self.table._primary_key: self._returning = (self.table._primary_key,) try: return super(Insert, self)._execute(database) except self.DefaultValuesException: pass def handle_result(self, database, cursor): if self._return_cursor: return cursor if self._as_rowcount: return database.rows_affected(cursor) return database.last_insert_id(cursor, self._query_type)
Insert
python
python-markdown__markdown
tests/test_apis.py
{ "start": 11890, "end": 13533 }
class ____(unittest.TestCase): """ Test Error Reporting. """ def setUp(self): # Set warnings to be raised as errors warnings.simplefilter('error') def tearDown(self): # Reset warning behavior back to default warnings.simplefilter('default') def testBadOutputFormat(self): """ Test failure on bad output_format. """ self.assertRaises(KeyError, markdown.Markdown, output_format='invalid') def testLoadExtensionFailure(self): """ Test failure of an extension to load. """ self.assertRaises( ImportError, markdown.Markdown, extensions=['non_existant_ext'] ) def testLoadBadExtension(self): """ Test loading of an Extension with no makeExtension function. """ self.assertRaises(AttributeError, markdown.Markdown, extensions=['markdown.util']) def testNonExtension(self): """ Test loading a non Extension object as an extension. """ self.assertRaises(TypeError, markdown.Markdown, extensions=[object]) def testDotNotationExtensionWithBadClass(self): """ Test Extension loading with non-existent class name (`path.to.module:Class`). """ self.assertRaises( AttributeError, markdown.Markdown, extensions=['markdown.extensions.footnotes:MissingExtension'] ) def testBaseExtention(self): """ Test that the base Extension class will raise `NotImplemented`. """ self.assertRaises( NotImplementedError, markdown.Markdown, extensions=[markdown.extensions.Extension()] )
TestErrors
python
dask__dask
dask/dataframe/dask_expr/_reductions.py
{ "start": 44810, "end": 45119 }
class ____(Reduction): @functools.cached_property def _meta(self): return make_meta(bool) reduction_chunk = methods.monotonic_increasing_chunk reduction_combine = methods.monotonic_increasing_combine reduction_aggregate = methods.monotonic_increasing_aggregate
IsMonotonicIncreasing
python
ray-project__ray
python/ray/autoscaler/v2/schema.py
{ "start": 3417, "end": 4246 }
class ____(ResourceDemand): # Details string (parsed into below information) details: str # Placement group's id. pg_id: Optional[str] = None # Strategy, e.g. STRICT_SPREAD strategy: Optional[str] = None # Placement group's state, e.g. PENDING state: Optional[str] = None def __post_init__(self): if not self.details: return # Details in the format of <pg_id>:<strategy>|<state>, parse # it into the above fields. pattern = r"^.*:.*\|.*$" match = re.match(pattern, self.details) if not match: return pg_id, details = self.details.split(":") strategy, state = details.split("|") self.pg_id = pg_id self.strategy = strategy self.state = state @dataclass
PlacementGroupResourceDemand
python
coleifer__peewee
tests/postgres.py
{ "start": 17308, "end": 20368 }
class ____(ModelTestCase): database = db requires = [FTSModel] messages = [ 'A faith is a necessity to a man. Woe to him who believes in nothing.', 'All who call on God in true faith, earnestly from the heart, will ' 'certainly be heard, and will receive what they have asked and desired.', 'Be faithful in small things because it is in them that your strength lies.', 'Faith consists in believing when it is beyond the power of reason to believe.', 'Faith has to do with things that are not seen and hope with things that are not at hand.', ] def setUp(self): super(TestTSVectorField, self).setUp() for idx, message in enumerate(self.messages): FTSModel.create(title=str(idx), data=message, fts_data=fn.to_tsvector(message)) def assertMessages(self, expr, expected): query = FTSModel.select().where(expr).order_by(FTSModel.id) titles = [row.title for row in query] self.assertEqual(list(map(int, titles)), expected) def test_sql(self): query = FTSModel.select().where(Match(FTSModel.data, 'foo bar')) self.assertSQL(query, ( 'SELECT "t1"."id", "t1"."title", "t1"."data", "t1"."fts_data" ' 'FROM "fts_model" AS "t1" ' 'WHERE (to_tsvector("t1"."data") @@ to_tsquery(?))'), ['foo bar']) def test_match_function(self): D = FTSModel.data self.assertMessages(Match(D, 'heart'), [1]) self.assertMessages(Match(D, 'god'), [1]) self.assertMessages(Match(D, 'faith'), [0, 1, 2, 3, 4]) self.assertMessages(Match(D, 'thing'), [2, 4]) self.assertMessages(Match(D, 'faith & things'), [2, 4]) self.assertMessages(Match(D, 'god | things'), [1, 2, 4]) self.assertMessages(Match(D, 'god & things'), []) def test_tsvector_field(self): M = FTSModel.fts_data.match self.assertMessages(M('heart'), [1]) self.assertMessages(M('god'), [1]) self.assertMessages(M('faith'), [0, 1, 2, 3, 4]) self.assertMessages(M('thing'), [2, 4]) self.assertMessages(M('faith & things'), [2, 4]) self.assertMessages(M('god | things'), [1, 2, 4]) self.assertMessages(M('god & things'), []) # Using the plain parser we cannot express "OR", but individual term # match works like we expect and multi-term is AND-ed together. self.assertMessages(M('god | things', plain=True), []) self.assertMessages(M('god', plain=True), [1]) self.assertMessages(M('thing', plain=True), [2, 4]) self.assertMessages(M('faith things', plain=True), [2, 4]) def pg93(): with db: return db.connection().server_version >= 90300 def pg10(): with db: return db.connection().server_version >= 100000 def pg12(): with db: return db.connection().server_version >= 120000 JSON_SUPPORT = (JsonModel is not None) and pg93() @skip_unless(JSON_SUPPORT, 'json support unavailable')
TestTSVectorField
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 322391, "end": 328176 }
class ____(Request): """ Enqueue tasks :param ids: IDs of the tasks to enqueue :type ids: Sequence[str] :param status_reason: Reason for status change :type status_reason: str :param status_message: Extra information regarding status change :type status_message: str :param queue: Queue id. If not provided and no queue name is passed then tasks are added to the default queue. :type queue: str :param validate_tasks: If set then tasks are validated before enqueue :type validate_tasks: bool :param queue_name: The name of the queue. If the queue does not exist then it is auto-created. Cannot be used together with the queue id :type queue_name: str :param verify_watched_queue: If passed then check wheter there are any workers watiching the queue :type verify_watched_queue: bool """ _service = "tasks" _action = "enqueue_many" _version = "2.23" _schema = { "definitions": {}, "properties": { "ids": { "description": "IDs of the tasks to enqueue", "items": {"type": "string"}, "type": "array", }, "queue": { "description": ( "Queue id. If not provided and no queue name is passed then tasks are added to the default queue." ), "type": "string", }, "queue_name": { "description": ( "The name of the queue. If the queue does not exist then it is auto-created. Cannot be used " "together with the queue id" ), "type": "string", }, "status_message": { "description": "Extra information regarding status change", "type": "string", }, "status_reason": { "description": "Reason for status change", "type": "string", }, "validate_tasks": { "default": False, "description": "If set then tasks are validated before enqueue", "type": "boolean", }, "verify_watched_queue": { "default": False, "description": "If passed then check wheter there are any workers watiching the queue", "type": "boolean", }, }, "required": ["ids"], "type": "object", } def __init__( self, ids, status_reason=None, status_message=None, queue=None, validate_tasks=False, queue_name=None, verify_watched_queue=False, **kwargs ): super(EnqueueManyRequest, self).__init__(**kwargs) self.ids = ids self.status_reason = status_reason self.status_message = status_message self.queue = queue self.validate_tasks = validate_tasks self.queue_name = queue_name self.verify_watched_queue = verify_watched_queue @schema_property("ids") def ids(self): return self._property_ids @ids.setter def ids(self, value): if value is None: self._property_ids = None return self.assert_isinstance(value, "ids", (list, tuple)) self.assert_isinstance(value, "ids", six.string_types, is_array=True) self._property_ids = value @schema_property("status_reason") def status_reason(self): return self._property_status_reason @status_reason.setter def status_reason(self, value): if value is None: self._property_status_reason = None return self.assert_isinstance(value, "status_reason", six.string_types) self._property_status_reason = value @schema_property("status_message") def status_message(self): return self._property_status_message @status_message.setter def status_message(self, value): if value is None: self._property_status_message = None return self.assert_isinstance(value, "status_message", six.string_types) self._property_status_message = value @schema_property("queue") def queue(self): return self._property_queue @queue.setter def queue(self, value): if value is None: self._property_queue = None return self.assert_isinstance(value, "queue", six.string_types) self._property_queue = value @schema_property("validate_tasks") def validate_tasks(self): return self._property_validate_tasks @validate_tasks.setter def validate_tasks(self, value): if value is None: self._property_validate_tasks = None return self.assert_isinstance(value, "validate_tasks", (bool,)) self._property_validate_tasks = value @schema_property("queue_name") def queue_name(self): return self._property_queue_name @queue_name.setter def queue_name(self, value): if value is None: self._property_queue_name = None return self.assert_isinstance(value, "queue_name", six.string_types) self._property_queue_name = value @schema_property("verify_watched_queue") def verify_watched_queue(self): return self._property_verify_watched_queue @verify_watched_queue.setter def verify_watched_queue(self, value): if value is None: self._property_verify_watched_queue = None return self.assert_isinstance(value, "verify_watched_queue", (bool,)) self._property_verify_watched_queue = value
EnqueueManyRequest
python
tensorflow__tensorflow
tensorflow/python/ops/lookup_ops.py
{ "start": 14793, "end": 16728 }
class ____(StaticHashTable): """A generic hash table that is immutable once initialized. When running in graph mode, you must evaluate the tensor returned by `tf.tables_initializer()` before evaluating the tensor returned by this class's `lookup()` method. Example usage in graph mode: ```python keys_tensor = tf.constant([1, 2]) vals_tensor = tf.constant([3, 4]) input_tensor = tf.constant([1, 5]) table = tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1) out = table.lookup(input_tensor) with tf.Session() as sess: sess.run(tf.tables_initializer()) print(sess.run(out)) ``` Note that in graph mode if you set `experimental_is_anonymous` to `True`, you should only call `Session.run` once, otherwise each `Session.run` will create (and destroy) a new table unrelated to each other, leading to errors such as "Table not initialized". You can do so like this: ```python keys_tensor = tf.constant([1, 2]) vals_tensor = tf.constant([3, 4]) input_tensor = tf.constant([1, 5]) table = tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1, experimental_is_anonymous=True) with tf.control_dependencies([tf.tables_initializer()]): out = table.lookup(input_tensor) with tf.Session() as sess: print(sess.run(out)) ``` In eager mode, no special code is needed to initialize the table. Example usage in eager mode: ```python tf.enable_eager_execution() keys_tensor = tf.constant([1, 2]) vals_tensor = tf.constant([3, 4]) input_tensor = tf.constant([1, 5]) table = tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1) print(table.lookup(input_tensor)) ``` """ @property def initializer(self): return self._init_op # For backwards compatibility. This will be removed in TF 2.0.
StaticHashTableV1
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/schedules/__init__.py
{ "start": 1174, "end": 1338 }
class ____(graphene.Enum): RUNNING = "RUNNING" STOPPED = "STOPPED" ENDED = "ENDED" class Meta: name = "ScheduleStatus"
GrapheneScheduleStatus
python
pallets__jinja
src/jinja2/ext.py
{ "start": 8147, "end": 20565 }
class ____(Extension): """This extension adds gettext support to Jinja.""" tags = {"trans"} # TODO: the i18n extension is currently reevaluating values in a few # situations. Take this example: # {% trans count=something() %}{{ count }} foo{% pluralize # %}{{ count }} fooss{% endtrans %} # something is called twice here. One time for the gettext value and # the other time for the n-parameter of the ngettext function. def __init__(self, environment: Environment) -> None: super().__init__(environment) environment.globals["_"] = _gettext_alias environment.extend( install_gettext_translations=self._install, install_null_translations=self._install_null, install_gettext_callables=self._install_callables, uninstall_gettext_translations=self._uninstall, extract_translations=self._extract, newstyle_gettext=False, ) def _install( self, translations: "_SupportedTranslations", newstyle: bool | None = None ) -> None: # ugettext and ungettext are preferred in case the I18N library # is providing compatibility with older Python versions. gettext = getattr(translations, "ugettext", None) if gettext is None: gettext = translations.gettext ngettext = getattr(translations, "ungettext", None) if ngettext is None: ngettext = translations.ngettext pgettext = getattr(translations, "pgettext", None) npgettext = getattr(translations, "npgettext", None) self._install_callables( gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext ) def _install_null(self, newstyle: bool | None = None) -> None: import gettext translations = gettext.NullTranslations() self._install_callables( gettext=translations.gettext, ngettext=translations.ngettext, newstyle=newstyle, pgettext=translations.pgettext, npgettext=translations.npgettext, ) def _install_callables( self, gettext: t.Callable[[str], str], ngettext: t.Callable[[str, str, int], str], newstyle: bool | None = None, pgettext: t.Callable[[str, str], str] | None = None, npgettext: t.Callable[[str, str, str, int], str] | None = None, ) -> None: if newstyle is not None: self.environment.newstyle_gettext = newstyle # type: ignore if self.environment.newstyle_gettext: # type: ignore gettext = _make_new_gettext(gettext) ngettext = _make_new_ngettext(ngettext) if pgettext is not None: pgettext = _make_new_pgettext(pgettext) if npgettext is not None: npgettext = _make_new_npgettext(npgettext) self.environment.globals.update( gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext ) def _uninstall(self, translations: "_SupportedTranslations") -> None: for key in ("gettext", "ngettext", "pgettext", "npgettext"): self.environment.globals.pop(key, None) def _extract( self, source: str | nodes.Template, gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS, ) -> t.Iterator[tuple[int, str, str | None | tuple[str | None, ...]]]: if isinstance(source, str): source = self.environment.parse(source) return extract_from_ast(source, gettext_functions) def parse(self, parser: "Parser") -> nodes.Node | list[nodes.Node]: """Parse a translatable tag.""" lineno = next(parser.stream).lineno context = None context_token = parser.stream.next_if("string") if context_token is not None: context = context_token.value # find all the variables referenced. Additionally a variable can be # defined in the body of the trans block too, but this is checked at # a later state. plural_expr: nodes.Expr | None = None plural_expr_assignment: nodes.Assign | None = None num_called_num = False variables: dict[str, nodes.Expr] = {} trimmed = None while parser.stream.current.type != "block_end": if variables: parser.stream.expect("comma") # skip colon for python compatibility if parser.stream.skip_if("colon"): break token = parser.stream.expect("name") if token.value in variables: parser.fail( f"translatable variable {token.value!r} defined twice.", token.lineno, exc=TemplateAssertionError, ) # expressions if parser.stream.current.type == "assign": next(parser.stream) variables[token.value] = var = parser.parse_expression() elif trimmed is None and token.value in ("trimmed", "notrimmed"): trimmed = token.value == "trimmed" continue else: variables[token.value] = var = nodes.Name(token.value, "load") if plural_expr is None: if isinstance(var, nodes.Call): plural_expr = nodes.Name("_trans", "load") variables[token.value] = plural_expr plural_expr_assignment = nodes.Assign( nodes.Name("_trans", "store"), var ) else: plural_expr = var num_called_num = token.value == "num" parser.stream.expect("block_end") plural = None have_plural = False referenced = set() # now parse until endtrans or pluralize singular_names, singular = self._parse_block(parser, True) if singular_names: referenced.update(singular_names) if plural_expr is None: plural_expr = nodes.Name(singular_names[0], "load") num_called_num = singular_names[0] == "num" # if we have a pluralize block, we parse that too if parser.stream.current.test("name:pluralize"): have_plural = True next(parser.stream) if parser.stream.current.type != "block_end": token = parser.stream.expect("name") if token.value not in variables: parser.fail( f"unknown variable {token.value!r} for pluralization", token.lineno, exc=TemplateAssertionError, ) plural_expr = variables[token.value] num_called_num = token.value == "num" parser.stream.expect("block_end") plural_names, plural = self._parse_block(parser, False) next(parser.stream) referenced.update(plural_names) else: next(parser.stream) # register free names as simple name expressions for name in referenced: if name not in variables: variables[name] = nodes.Name(name, "load") if not have_plural: plural_expr = None elif plural_expr is None: parser.fail("pluralize without variables", lineno) if trimmed is None: trimmed = self.environment.policies["ext.i18n.trimmed"] if trimmed: singular = self._trim_whitespace(singular) if plural: plural = self._trim_whitespace(plural) node = self._make_node( singular, plural, context, variables, plural_expr, bool(referenced), num_called_num and have_plural, ) node.set_lineno(lineno) if plural_expr_assignment is not None: return [plural_expr_assignment, node] else: return node def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str: return _ws_re.sub(" ", string.strip()) def _parse_block( self, parser: "Parser", allow_pluralize: bool ) -> tuple[list[str], str]: """Parse until the next block tag with a given name.""" referenced = [] buf = [] while True: if parser.stream.current.type == "data": buf.append(parser.stream.current.value.replace("%", "%%")) next(parser.stream) elif parser.stream.current.type == "variable_begin": next(parser.stream) name = parser.stream.expect("name").value referenced.append(name) buf.append(f"%({name})s") parser.stream.expect("variable_end") elif parser.stream.current.type == "block_begin": next(parser.stream) block_name = ( parser.stream.current.value if parser.stream.current.type == "name" else None ) if block_name == "endtrans": break elif block_name == "pluralize": if allow_pluralize: break parser.fail( "a translatable section can have only one pluralize section" ) elif block_name == "trans": parser.fail( "trans blocks can't be nested; did you mean `endtrans`?" ) parser.fail( f"control structures in translatable sections are not allowed; " f"saw `{block_name}`" ) elif parser.stream.eos: parser.fail("unclosed translation block") else: raise RuntimeError("internal parser error") return referenced, concat(buf) def _make_node( self, singular: str, plural: str | None, context: str | None, variables: dict[str, nodes.Expr], plural_expr: nodes.Expr | None, vars_referenced: bool, num_called_num: bool, ) -> nodes.Output: """Generates a useful node from the data provided.""" newstyle = self.environment.newstyle_gettext # type: ignore node: nodes.Expr # no variables referenced? no need to escape for old style # gettext invocations only if there are vars. if not vars_referenced and not newstyle: singular = singular.replace("%%", "%") if plural: plural = plural.replace("%%", "%") func_name = "gettext" func_args: list[nodes.Expr] = [nodes.Const(singular)] if context is not None: func_args.insert(0, nodes.Const(context)) func_name = f"p{func_name}" if plural_expr is not None: func_name = f"n{func_name}" func_args.extend((nodes.Const(plural), plural_expr)) node = nodes.Call(nodes.Name(func_name, "load"), func_args, [], None, None) # in case newstyle gettext is used, the method is powerful # enough to handle the variable expansion and autoescape # handling itself if newstyle: for key, value in variables.items(): # the function adds that later anyways in case num was # called num, so just skip it. if num_called_num and key == "num": continue node.kwargs.append(nodes.Keyword(key, value)) # otherwise do that here else: # mark the return value as safe if we are in an # environment with autoescaping turned on node = nodes.MarkSafeIfAutoescape(node) if variables: node = nodes.Mod( node, nodes.Dict( [ nodes.Pair(nodes.Const(key), value) for key, value in variables.items() ] ), ) return nodes.Output([node])
InternationalizationExtension
python
bokeh__bokeh
src/bokeh/events.py
{ "start": 24811, "end": 25117 }
class ____(TypedDict): type: Literal["event"] name: str values: Any #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- Deserializer.register("event", Event.from_serializable)
BokehEventRep