content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\popen_spawn_win32.cpython-313.pyc
popen_spawn_win32.cpython-313.pyc
Other
6,388
0.8
0
0.058824
node-utils
123
2024-01-06T05:10:52.626517
GPL-3.0
false
c078b111f7cf25efb9005c2aa0e6a871
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\process.cpython-313.pyc
process.cpython-313.pyc
Other
17,960
0.95
0.063694
0
react-lib
109
2023-08-01T23:39:33.934561
BSD-3-Clause
false
43d38a0306c9561023f6578dd97c787e
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\queues.cpython-313.pyc
queues.cpython-313.pyc
Other
19,102
0.8
0
0.006993
react-lib
540
2024-12-23T08:20:48.404964
MIT
false
eda4ed87d15eb8cd8f142d26f4a58cf4
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\reduction.cpython-313.pyc
reduction.cpython-313.pyc
Other
14,358
0.95
0.086538
0.019608
node-utils
85
2024-06-21T13:11:10.826020
GPL-3.0
false
b2d31f6eccc93f774a38e3b025ce59d0
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\resource_sharer.cpython-313.pyc
resource_sharer.cpython-313.pyc
Other
9,110
0.8
0.057971
0
awesome-app
140
2023-09-06T06:53:03.615570
BSD-3-Clause
false
7a6fa60499be20c969bd80611ab7aac8
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\resource_tracker.cpython-313.pyc
resource_tracker.cpython-313.pyc
Other
11,626
0.95
0.017857
0
node-utils
603
2025-06-10T21:42:20.979379
BSD-3-Clause
false
099129a5b7bd7a4d7e90e7b3c3ae80b0
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\sharedctypes.cpython-313.pyc
sharedctypes.cpython-313.pyc
Other
10,966
0.95
0.065421
0.038835
vue-tools
505
2024-02-27T11:21:15.868057
Apache-2.0
false
ef696ff161537d2829d203d93f0fc66d
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\shared_memory.cpython-313.pyc
shared_memory.cpython-313.pyc
Other
23,835
0.95
0.072816
0.015625
awesome-app
713
2024-11-17T17:06:04.460396
GPL-3.0
false
d7acf239aa7489e8aa39fd9a27fb7dde
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\spawn.cpython-313.pyc
spawn.cpython-313.pyc
Other
11,868
0.95
0.037313
0.007937
awesome-app
988
2025-03-24T12:41:04.844707
BSD-3-Clause
false
711ded486a0a1fc7b889a2a254942e5b
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\synchronize.cpython-313.pyc
synchronize.cpython-313.pyc
Other
21,809
0.95
0.011561
0.006329
vue-tools
628
2024-04-18T00:42:46.616204
BSD-3-Clause
false
1ef61408d6e7b99352f626de8912c06a
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\util.cpython-313.pyc
util.cpython-313.pyc
Other
18,696
0.95
0.017442
0
vue-tools
269
2025-01-18T04:25:10.166304
BSD-3-Clause
false
265d1a7f0539d06ccb83d49f96d0de16
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\__info__.cpython-313.pyc
__info__.cpython-313.pyc
Other
7,942
0.95
0.088372
0
react-lib
318
2024-08-20T00:00:19.364583
Apache-2.0
false
daab2fa3756cc8ee6e894da28cca8a90
\n\n
.venv\Lib\site-packages\multiprocess\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
2,283
0.8
0
0
vue-tools
642
2024-01-01T22:39:15.545873
GPL-3.0
false
41e9a2a9f6bf036e24bd413f68070cb2
Copyright (c) 2006-2008, R Oudkerk\n\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n1. Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n3. Neither the name of author nor the names of any contributors may be\n used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\nOR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\nHOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\nOUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGE.\n
.venv\Lib\site-packages\multiprocess-0.70.16.dist-info\COPYING
COPYING
Other
1,493
0.7
0
0
node-utils
4
2024-11-17T06:41:44.381445
Apache-2.0
false
eb14345c0d5404c670daced4f474ce9c
pip\n
.venv\Lib\site-packages\multiprocess-0.70.16.dist-info\INSTALLER
INSTALLER
Other
4
0.5
0
0
awesome-app
273
2025-04-15T19:07:20.198371
BSD-3-Clause
false
365c9bfeb7d89244f2ce01c1de44cb85
Copyright (c) 2008-2016 California Institute of Technology.\nCopyright (c) 2016-2024 The Uncertainty Quantification Foundation.\nAll rights reserved.\n\nThis software forks the python package "multiprocessing". Licence and\ncopyright information for multiprocessing can be found in "COPYING".\n\nThis software is available subject to the conditions and terms laid\nout below. By downloading and using this software you are agreeing\nto the following conditions.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n - Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n - Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n - Neither the names of the copyright holders nor the names of any of\n the contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\nTO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\nOR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\nWHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\nOTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\nADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n
.venv\Lib\site-packages\multiprocess-0.70.16.dist-info\LICENSE
LICENSE
Other
1,930
0.7
0.026316
0
awesome-app
596
2023-08-01T01:41:58.706189
BSD-3-Clause
false
9ccacb3c062cdac1638509a2a3cfa59f
Metadata-Version: 2.1\nName: multiprocess\nVersion: 0.70.16\nSummary: better multiprocessing and multithreading in Python\nHome-page: https://github.com/uqfoundation/multiprocess\nDownload-URL: https://pypi.org/project/multiprocess/#files\nAuthor: Mike McKerns\nAuthor-email: mmckerns@uqfoundation.org\nMaintainer: Mike McKerns\nMaintainer-email: mmckerns@uqfoundation.org\nLicense: BSD-3-Clause\nProject-URL: Documentation, http://multiprocess.rtfd.io\nProject-URL: Source Code, https://github.com/uqfoundation/multiprocess\nProject-URL: Bug Tracker, https://github.com/uqfoundation/multiprocess/issues\nPlatform: Linux\nPlatform: Windows\nPlatform: Mac\nClassifier: Development Status :: 5 - Production/Stable\nClassifier: Intended Audience :: Developers\nClassifier: Intended Audience :: Science/Research\nClassifier: License :: OSI Approved :: BSD License\nClassifier: Programming Language :: Python :: 3\nClassifier: Programming Language :: Python :: 3.8\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Programming Language :: Python :: Implementation :: CPython\nClassifier: Programming Language :: Python :: Implementation :: PyPy\nClassifier: Topic :: Scientific/Engineering\nClassifier: Topic :: Software Development\nRequires-Python: >=3.8\nLicense-File: LICENSE\nLicense-File: COPYING\nRequires-Dist: dill (>=0.3.8)\n\n-----------------------------------------------------------------\nmultiprocess: better multiprocessing and multithreading in Python\n-----------------------------------------------------------------\n\nAbout Multiprocess\n==================\n\n``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6.\n\n``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing.\n``multiprocess`` is in active development, so any user feedback, bug reports, comments,\nor suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query.\n\n\nMajor Features\n==============\n\n``multiprocess`` enables:\n\n - objects to be transferred between processes using pipes or multi-producer/multi-consumer queues\n - objects to be shared between processes using a server process or (for simple data) shared memory\n\n``multiprocess`` provides:\n\n - equivalents of all the synchronization primitives in ``threading``\n - a ``Pool`` class to facilitate submitting tasks to worker processes\n - enhanced serialization, using ``dill``\n\n\nCurrent Release\n===============\n\nThe latest released version of ``multiprocess`` is available from:\n\n https://pypi.org/project/multiprocess\n\n``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``.\n\n\nDevelopment Version\n===================\n\nYou can get the latest development version with all the shiny new features at:\n\n https://github.com/uqfoundation\n\nIf you have a new contribution, please submit a pull request.\n\n\nInstallation\n============\n\n``multiprocess`` can be installed with ``pip``::\n\n $ pip install multiprocess\n\nFor Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler.\n\n\nRequirements\n============\n\n``multiprocess`` requires:\n\n - ``python`` (or ``pypy``), **>=3.8**\n - ``setuptools``, **>=42**\n - ``dill``, **>=0.3.8**\n\n\nBasic Usage\n===========\n\nThe ``multiprocess.Process`` class follows the API of ``threading.Thread``.\nFor example ::\n\n from multiprocess import Process, Queue\n\n def f(q):\n q.put('hello world')\n\n if __name__ == '__main__':\n q = Queue()\n p = Process(target=f, args=[q])\n p.start()\n print (q.get())\n p.join()\n\nSynchronization primitives like locks, semaphores and conditions are\navailable, for example ::\n\n >>> from multiprocess import Condition\n >>> c = Condition()\n >>> print (c)\n <Condition(<RLock(None, 0)>), 0>\n >>> c.acquire()\n True\n >>> print (c)\n <Condition(<RLock(MainProcess, 1)>), 0>\n\nOne can also use a manager to create shared objects either in shared\nmemory or in a server process, for example ::\n\n >>> from multiprocess import Manager\n >>> manager = Manager()\n >>> l = manager.list(range(10))\n >>> l.reverse()\n >>> print (l)\n [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n >>> print (repr(l))\n <Proxy[list] object at 0x00E1B3B0>\n\nTasks can be offloaded to a pool of worker processes in various ways,\nfor example ::\n\n >>> from multiprocess import Pool\n >>> def f(x): return x*x\n ...\n >>> p = Pool(4)\n >>> result = p.map_async(f, range(10))\n >>> print (result.get(timeout=1))\n [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]\n\nWhen ``dill`` is installed, serialization is extended to most objects,\nfor example ::\n\n >>> from multiprocess import Pool\n >>> p = Pool(4)\n >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10)))\n [0, 2, 6, 12, 20, 30, 42, 56, 72, 90]\n\n\nMore Information\n================\n\nProbably the best way to get started is to look at the documentation at\nhttp://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that\ndemonstrate how ``multiprocess`` can be used to leverge multiple processes\nto execute Python in parallel. You can run the test suite with\n``python -m multiprocess.tests``. As ``multiprocess`` conforms to the\n``multiprocessing`` interface, the examples and documentation found at\nhttp://docs.python.org/library/multiprocessing.html also apply to\n``multiprocess`` if one will ``import multiprocessing as multiprocess``.\nSee https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples\nfor a set of examples that demonstrate some basic use cases and benchmarking\nfor running Python code in parallel. Please feel free to submit a ticket on\ngithub, or ask a question on stackoverflow (**@Mike McKerns**). If you would\nlike to share how you use ``multiprocess`` in your work, please send an email\n(to **mmckerns at uqfoundation dot org**).\n\n\nCitation\n========\n\nIf you use ``multiprocess`` to do research that leads to publication, we ask that you\nacknowledge use of ``multiprocess`` by citing the following in your publication::\n\n M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,\n "Building a framework for predictive science", Proceedings of\n the 10th Python in Science Conference, 2011;\n http://arxiv.org/pdf/1202.1056\n\n Michael McKerns and Michael Aivazis,\n "pathos: a framework for heterogeneous computing", 2010- ;\n https://uqfoundation.github.io/project/pathos\n\nPlease see https://uqfoundation.github.io/project/pathos or\nhttp://arxiv.org/pdf/1202.1056 for further information.\n
.venv\Lib\site-packages\multiprocess-0.70.16.dist-info\METADATA
METADATA
Other
7,151
0.95
0.08867
0
react-lib
313
2024-08-13T10:30:19.662487
Apache-2.0
false
cc48c9f225ae64aca9f52f8f2fe7eb88
_multiprocess/__init__.py,sha256=zX5_h36TGSL0brHRtBvCL5E59ccW7yjL79i-Y399ODM,321\n_multiprocess/__pycache__/__init__.cpython-313.pyc,,\nmultiprocess-0.70.16.dist-info/COPYING,sha256=n3_yfLkw0sMgLuB-PS1hRvTeZ20GmjPaMWbJjNuoOpU,1493\nmultiprocess-0.70.16.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nmultiprocess-0.70.16.dist-info/LICENSE,sha256=6XUJedJKg2dhI98BD3PMtVtZvRFT-oGczkOr5B4tEEA,1930\nmultiprocess-0.70.16.dist-info/METADATA,sha256=Sv2eH2CjjyjVYaryTKqHkbJTlxlVA-SbmziCgkBJeQ0,7151\nmultiprocess-0.70.16.dist-info/RECORD,,\nmultiprocess-0.70.16.dist-info/WHEEL,sha256=5fgWN-TjI_Xv7EKUndC5Gdk-CmDdjWHfedmCHAp9Yhg,94\nmultiprocess-0.70.16.dist-info/top_level.txt,sha256=qtJc8GNdvi6suNpISX0Myln9AXJBYrNuas1MCqRPPqg,27\nmultiprocess/__info__.py,sha256=84TUBn1oJMNpbVvXKs0lKyfLYaZvRr-ZVh1zHM9VeCY,7997\nmultiprocess/__init__.py,sha256=XWUBDGorUkDW04h64xe51pUV9N5gzvSDj3tNT2ekifw,1856\nmultiprocess/__pycache__/__info__.cpython-313.pyc,,\nmultiprocess/__pycache__/__init__.cpython-313.pyc,,\nmultiprocess/__pycache__/connection.cpython-313.pyc,,\nmultiprocess/__pycache__/context.cpython-313.pyc,,\nmultiprocess/__pycache__/forkserver.cpython-313.pyc,,\nmultiprocess/__pycache__/heap.cpython-313.pyc,,\nmultiprocess/__pycache__/managers.cpython-313.pyc,,\nmultiprocess/__pycache__/pool.cpython-313.pyc,,\nmultiprocess/__pycache__/popen_fork.cpython-313.pyc,,\nmultiprocess/__pycache__/popen_forkserver.cpython-313.pyc,,\nmultiprocess/__pycache__/popen_spawn_posix.cpython-313.pyc,,\nmultiprocess/__pycache__/popen_spawn_win32.cpython-313.pyc,,\nmultiprocess/__pycache__/process.cpython-313.pyc,,\nmultiprocess/__pycache__/queues.cpython-313.pyc,,\nmultiprocess/__pycache__/reduction.cpython-313.pyc,,\nmultiprocess/__pycache__/resource_sharer.cpython-313.pyc,,\nmultiprocess/__pycache__/resource_tracker.cpython-313.pyc,,\nmultiprocess/__pycache__/shared_memory.cpython-313.pyc,,\nmultiprocess/__pycache__/sharedctypes.cpython-313.pyc,,\nmultiprocess/__pycache__/spawn.cpython-313.pyc,,\nmultiprocess/__pycache__/synchronize.cpython-313.pyc,,\nmultiprocess/__pycache__/util.cpython-313.pyc,,\nmultiprocess/connection.py,sha256=-MnrMNG2rA4O-2U5s0Ct_nXi1Q8x1QFTAShwSELFiJ0,41577\nmultiprocess/context.py,sha256=xqunfqOp2vRQqrw-eFq3q4jfwa3GdTrL4Mh1M4AvtgI,11686\nmultiprocess/dummy/__init__.py,sha256=kSekDqD_NCy0FDg7XnxZSgW-Ldg1_iRr07sNwDajKpA,3061\nmultiprocess/dummy/__pycache__/__init__.cpython-313.pyc,,\nmultiprocess/dummy/__pycache__/connection.cpython-313.pyc,,\nmultiprocess/dummy/connection.py,sha256=1j3Rl5_enBM-_kMO6HDmum3kPAoFE4Zs485HV5H-V6s,1598\nmultiprocess/forkserver.py,sha256=_WT6-elIjrr7t8RXoZ6u0RGHgLaqoWweAiqwaWDlUCw,12130\nmultiprocess/heap.py,sha256=9rt5u5m5rkhJNfDWiCLpYDoWIt0LbElmx52yMqk7phQ,11626\nmultiprocess/managers.py,sha256=wzD4jg66U41kwWsqguoDZN0z-JKFbrn0loK4b_fU3GA,47675\nmultiprocess/pool.py,sha256=QYzUHqXDQeZsaXmjgHD29VLLvfBTkHMCK20Vxn5T9IA,32760\nmultiprocess/popen_fork.py,sha256=Nvq5vVId24UfkOQxXhxZbcXuo8d6YMc409yRXAamTd0,2374\nmultiprocess/popen_forkserver.py,sha256=SrEbV8Wv0Uu_UegkaW-cayXRdjTGXr560Yyy90pj-yE,2227\nmultiprocess/popen_spawn_posix.py,sha256=l7XSWqR5UWiUSJh35qeSElLuNfUeEYwvH5HzKRnnyqg,2029\nmultiprocess/popen_spawn_win32.py,sha256=XA9nNjov3-JjmO5ztPE3Ba9zJbR-W4UunYPjm0Y8C98,4353\nmultiprocess/process.py,sha256=WD3nlOajRpvtXuw18J4r7fUKGumP-h7oV21bqhQihEk,12133\nmultiprocess/queues.py,sha256=IyJsaDl3X6RJD8yOuYX6iGU9OXwi4Um6BA3tNhQt1OI,12615\nmultiprocess/reduction.py,sha256=NQQ6KbDhmuAyaDeWaIarTZQokGPhcFda1poNnPm5uNc,9637\nmultiprocess/resource_sharer.py,sha256=nEApLhMQqd8KunfaNKl3n8vdeiCGPxKrSL1Ja0nNAEk,5132\nmultiprocess/resource_tracker.py,sha256=YX78ClEGXDk1ieCaKRlJ5K1pNZnsvTyz6bBuWmGqZFw,10449\nmultiprocess/shared_memory.py,sha256=UTAecHECIOHElP9Tg6yURCo4pKZiLy65TkASjEXeGus,18458\nmultiprocess/sharedctypes.py,sha256=d-9SKRJHRlJJC331IxEoWOUXIeY9zxCbhWejXOmzGw0,6306\nmultiprocess/spawn.py,sha256=GPe8Ht5UI4wqLSGcPJvo8rcJmDcEK2A_bVNUEyG6ozI,9641\nmultiprocess/synchronize.py,sha256=xMTFX2wwOgbg_bXYgaL49zZAocuraXQ0XG3Uh8Uu7F0,12506\nmultiprocess/tests/__init__.py,sha256=AX00HJqMOEPdte08EFBG_5cpDjscm5XZQpN3bbClAsA,209750\nmultiprocess/tests/__main__.py,sha256=kePVxic_T6xt2jmqlQFlR4ef0ZAGaD4S-od3pXL-pnQ,888\nmultiprocess/tests/__pycache__/__init__.cpython-313.pyc,,\nmultiprocess/tests/__pycache__/__main__.cpython-313.pyc,,\nmultiprocess/tests/__pycache__/mp_fork_bomb.cpython-313.pyc,,\nmultiprocess/tests/__pycache__/mp_preload.cpython-313.pyc,,\nmultiprocess/tests/__pycache__/test_multiprocessing_main_handling.cpython-313.pyc,,\nmultiprocess/tests/mp_fork_bomb.py,sha256=6ADOEzh1aXHZ21aOGoBPhKcgB5sj15G9tQVgSc6GrlY,448\nmultiprocess/tests/mp_preload.py,sha256=1-WvLFMaPoH-vZbpUaJvvZHFxTpA9tgmct2vblQy99M,365\nmultiprocess/tests/test_multiprocessing_fork/__init__.py,sha256=h4YpM8po-3m2kFeoCuV9ZsKDP8UgY-e1Sx3mat9oI9o,829\nmultiprocess/tests/test_multiprocessing_fork/__pycache__/__init__.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_fork/__pycache__/test_manager.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_fork/__pycache__/test_misc.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_fork/__pycache__/test_processes.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_fork/__pycache__/test_threads.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_fork/test_manager.py,sha256=A5K0x43Vrfe2jAVqFkBZpyzfuFoYm_Ickby4z31-xPg,194\nmultiprocess/tests/test_multiprocessing_fork/test_misc.py,sha256=KhsBZTAEng6Dn-KAww-_eFMnnf14lWCXayQ4cMjx9Mk,193\nmultiprocess/tests/test_multiprocessing_fork/test_processes.py,sha256=3bPddzlEnRMkan2cSLfDx77hEydV51Mx-l51ERtwt4Q,196\nmultiprocess/tests/test_multiprocessing_fork/test_threads.py,sha256=_qq2fWmU5FuGtsOSoesXRV3DyCn0ptEezKrsnsE6MqI,194\nmultiprocess/tests/test_multiprocessing_forkserver/__init__.py,sha256=GnMnSAaJ_y557T6vZsqu7C1Z_H1gOjGbXqklkfNz6Wc,738\nmultiprocess/tests/test_multiprocessing_forkserver/__pycache__/__init__.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_forkserver/__pycache__/test_manager.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_forkserver/__pycache__/test_misc.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_forkserver/__pycache__/test_processes.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_forkserver/__pycache__/test_threads.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_forkserver/test_manager.py,sha256=PQTSGlK3U5B2RVaHbW8MJ2pMWeYJj4arKVLm49Fc9TY,200\nmultiprocess/tests/test_multiprocessing_forkserver/test_misc.py,sha256=EbDnh3_2MTNI3fFuC2GjM4pUUJ9cxRWXEwZZBV9FyoQ,199\nmultiprocess/tests/test_multiprocessing_forkserver/test_processes.py,sha256=ks2hMY1_y0ZiyEGqgAK340b2M6oupXksCo3QFCsgTVk,202\nmultiprocess/tests/test_multiprocessing_forkserver/test_threads.py,sha256=H1OyjLOUPkFsnw6QEEp7aSFPLPVndsZX92MAaU_gBFs,200\nmultiprocess/tests/test_multiprocessing_main_handling.py,sha256=gFVw9bMOg8pFZp0QGPIPlXDtrY8K5poeb54PrTGT0Ow,11847\nmultiprocess/tests/test_multiprocessing_spawn/__init__.py,sha256=pkhcDlpFcKmrN7-ekKF8Zz69x_X24JWqdIdiDypwJiY,639\nmultiprocess/tests/test_multiprocessing_spawn/__pycache__/__init__.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_spawn/__pycache__/test_manager.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_spawn/__pycache__/test_misc.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_spawn/__pycache__/test_processes.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_spawn/__pycache__/test_threads.cpython-313.pyc,,\nmultiprocess/tests/test_multiprocessing_spawn/test_manager.py,sha256=3zMuPfUzqSaRZbNbVpQoU9OU52KaJotd7ps0H-_hdFU,195\nmultiprocess/tests/test_multiprocessing_spawn/test_misc.py,sha256=Lb8nqsQL893F8TsvjBEh5s7nV37qAmetcLGk5tuZ3Kc,194\nmultiprocess/tests/test_multiprocessing_spawn/test_processes.py,sha256=HoZZlby6gRZONFoV5-wEC50lQFEawvwT_Jh6yZ4SCag,197\nmultiprocess/tests/test_multiprocessing_spawn/test_threads.py,sha256=pr9-K0KR8THDzYfGw4PGJ45RcGqREcTNX0OAE14exEw,195\nmultiprocess/util.py,sha256=zUGS2s1PyQSS90vLctLRC7QzegW56br_JDkLR7pDAFQ,14060\n
.venv\Lib\site-packages\multiprocess-0.70.16.dist-info\RECORD
RECORD
Other
8,044
0.7
0
0
node-utils
254
2023-08-04T12:19:33.612753
BSD-3-Clause
false
43ab95f798378adb15e938a49bffdd11
_multiprocess\nmultiprocess\n
.venv\Lib\site-packages\multiprocess-0.70.16.dist-info\top_level.txt
top_level.txt
Other
27
0.5
0
0
node-utils
847
2024-04-07T09:41:02.573404
BSD-3-Clause
false
de3a7c5f7b5f3d0dd54e62267f15c9e1
Wheel-Version: 1.0\nGenerator: bdist_wheel (0.38.4)\nRoot-Is-Purelib: true\nTag: py312-none-any\n\n
.venv\Lib\site-packages\multiprocess-0.70.16.dist-info\WHEEL
WHEEL
Other
94
0.5
0
0
react-lib
759
2023-07-14T07:02:16.110639
Apache-2.0
false
20bde460bcdb71a2d4551e7b9413007d
# pandas / Polars / etc. : if a user passes a dataframe from one of these\n# libraries, it means they must already have imported the given module.\n# So, we can just check sys.modules.\nfrom __future__ import annotations\n\nimport sys\nfrom typing import TYPE_CHECKING, Any\n\nif TYPE_CHECKING:\n import cudf\n import dask.dataframe as dd\n import duckdb\n import ibis\n import modin.pandas as mpd\n import pandas as pd\n import polars as pl\n import pyarrow as pa\n import pyspark.sql as pyspark_sql\n from pyspark.sql.connect.dataframe import DataFrame as PySparkConnectDataFrame\n from typing_extensions import TypeGuard, TypeIs\n\n from narwhals._spark_like.dataframe import SQLFrameDataFrame\n from narwhals.dataframe import DataFrame, LazyFrame\n from narwhals.series import Series\n from narwhals.typing import (\n IntoDataFrameT,\n IntoLazyFrameT,\n IntoSeriesT,\n _1DArray,\n _1DArrayInt,\n _2DArray,\n _NDArray,\n _NumpyScalar,\n _ShapeT,\n )\n\n\n# We silently allow these but - given that they claim\n# to be drop-in replacements for pandas - testing is\n# their responsibility.\nIMPORT_HOOKS = frozenset(["fireducks"])\n\n\ndef get_polars() -> Any:\n """Get Polars module (if already imported - else return None)."""\n return sys.modules.get("polars", None)\n\n\ndef get_pandas() -> Any:\n """Get pandas module (if already imported - else return None)."""\n return sys.modules.get("pandas", None)\n\n\ndef get_modin() -> Any: # pragma: no cover\n """Get modin.pandas module (if already imported - else return None)."""\n if (modin := sys.modules.get("modin", None)) is not None:\n return modin.pandas\n return None\n\n\ndef get_cudf() -> Any:\n """Get cudf module (if already imported - else return None)."""\n return sys.modules.get("cudf", None)\n\n\ndef get_cupy() -> Any:\n """Get cupy module (if already imported - else return None)."""\n return sys.modules.get("cupy", None)\n\n\ndef get_pyarrow() -> Any: # pragma: no cover\n """Get pyarrow module (if already imported - else return None)."""\n return sys.modules.get("pyarrow", None)\n\n\ndef get_numpy() -> Any:\n """Get numpy module (if already imported - else return None)."""\n return sys.modules.get("numpy", None)\n\n\ndef get_dask() -> Any:\n """Get dask (if already imported - else return None)."""\n return sys.modules.get("dask", None)\n\n\ndef get_dask_dataframe() -> Any:\n """Get dask.dataframe module (if already imported - else return None)."""\n return sys.modules.get("dask.dataframe", None)\n\n\ndef get_duckdb() -> Any:\n """Get duckdb module (if already imported - else return None)."""\n return sys.modules.get("duckdb", None)\n\n\ndef get_ibis() -> Any:\n """Get ibis module (if already imported - else return None)."""\n return sys.modules.get("ibis", None)\n\n\ndef get_dask_expr() -> Any: # pragma: no cover\n """Get dask_expr module (if already imported - else return None)."""\n if (dd := get_dask_dataframe()) is not None and hasattr(dd, "dask_expr"):\n return dd.dask_expr\n return sys.modules.get("dask_expr", None)\n\n\ndef get_pyspark() -> Any: # pragma: no cover\n """Get pyspark module (if already imported - else return None)."""\n return sys.modules.get("pyspark", None)\n\n\ndef get_pyspark_sql() -> Any:\n """Get pyspark.sql module (if already imported - else return None)."""\n return sys.modules.get("pyspark.sql", None)\n\n\ndef get_pyspark_connect() -> Any:\n """Get pyspark.sql.connect module (if already imported - else return None)."""\n return sys.modules.get("pyspark.sql.connect", None)\n\n\ndef get_sqlframe() -> Any:\n """Get sqlframe module (if already imported - else return None)."""\n return sys.modules.get("sqlframe", None)\n\n\ndef _raise_if_narwhals_df_or_lf(df: Any) -> None:\n if is_narwhals_dataframe(df) or is_narwhals_lazyframe(df):\n msg = (\n f"You passed a `{type(df)}` to `is_pandas_dataframe`.\n\n"\n "Hint: Instead of e.g. `is_pandas_dataframe(df)`, "\n "did you mean `is_pandas_dataframe(df.to_native())`?"\n )\n raise TypeError(msg)\n\n\ndef _raise_if_narwhals_series(ser: Any) -> None:\n if is_narwhals_series(ser):\n msg = (\n f"You passed a `{type(ser)}` to `is_pandas_series`.\n\n"\n "Hint: Instead of e.g. `is_pandas_series(ser)`, "\n "did you mean `is_pandas_series(ser.to_native())`?"\n )\n raise TypeError(msg)\n\n\ndef is_pandas_dataframe(df: Any) -> TypeIs[pd.DataFrame]:\n """Check whether `df` is a pandas DataFrame without importing pandas.\n\n Warning:\n This method cannot be called on a Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return ((pd := get_pandas()) is not None and isinstance(df, pd.DataFrame)) or any(\n (mod := sys.modules.get(module_name, None)) is not None\n and isinstance(df, mod.pandas.DataFrame)\n for module_name in IMPORT_HOOKS\n )\n\n\ndef is_pandas_series(ser: Any) -> TypeIs[pd.Series[Any]]:\n """Check whether `ser` is a pandas Series without importing pandas.\n\n Warning:\n This method cannot be called on Narwhals Series.\n """\n _raise_if_narwhals_series(ser)\n return ((pd := get_pandas()) is not None and isinstance(ser, pd.Series)) or any(\n (mod := sys.modules.get(module_name, None)) is not None\n and isinstance(ser, mod.pandas.Series)\n for module_name in IMPORT_HOOKS\n )\n\n\ndef is_pandas_index(index: Any) -> TypeIs[pd.Index[Any]]:\n """Check whether `index` is a pandas Index without importing pandas."""\n return ((pd := get_pandas()) is not None and isinstance(index, pd.Index)) or any(\n (mod := sys.modules.get(module_name, None)) is not None\n and isinstance(index, mod.pandas.Index)\n for module_name in IMPORT_HOOKS\n )\n\n\ndef is_modin_dataframe(df: Any) -> TypeIs[mpd.DataFrame]:\n """Check whether `df` is a modin DataFrame without importing modin.\n\n Warning:\n This method cannot be called on a Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return (mpd := get_modin()) is not None and isinstance(df, mpd.DataFrame)\n\n\ndef is_modin_series(ser: Any) -> TypeIs[mpd.Series]:\n """Check whether `ser` is a modin Series without importing modin.\n\n Warning:\n This method cannot be called on Narwhals Series.\n """\n _raise_if_narwhals_series(ser)\n return (mpd := get_modin()) is not None and isinstance(ser, mpd.Series)\n\n\ndef is_modin_index(index: Any) -> TypeIs[mpd.Index[Any]]: # pragma: no cover\n """Check whether `index` is a modin Index without importing modin."""\n return (mpd := get_modin()) is not None and isinstance(index, mpd.Index)\n\n\ndef is_cudf_dataframe(df: Any) -> TypeIs[cudf.DataFrame]:\n """Check whether `df` is a cudf DataFrame without importing cudf.\n\n Warning:\n This method cannot be called on a Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return (cudf := get_cudf()) is not None and isinstance(df, cudf.DataFrame)\n\n\ndef is_cudf_series(ser: Any) -> TypeIs[cudf.Series[Any]]:\n """Check whether `ser` is a cudf Series without importing cudf.\n\n Warning:\n This method cannot be called on Narwhals Series.\n """\n _raise_if_narwhals_series(ser)\n return (cudf := get_cudf()) is not None and isinstance(ser, cudf.Series)\n\n\ndef is_cudf_index(index: Any) -> TypeIs[cudf.Index]:\n """Check whether `index` is a cudf Index without importing cudf."""\n return (cudf := get_cudf()) is not None and isinstance(\n index, cudf.Index\n ) # pragma: no cover\n\n\ndef is_cupy_scalar(obj: Any) -> bool:\n return (\n (cupy := get_cupy()) is not None\n and isinstance(obj, cupy.ndarray)\n and obj.size == 1\n ) # pragma: no cover\n\n\ndef is_dask_dataframe(df: Any) -> TypeIs[dd.DataFrame]:\n """Check whether `df` is a Dask DataFrame without importing Dask.\n\n Warning:\n This method cannot be called on a Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return (dd := get_dask_dataframe()) is not None and isinstance(df, dd.DataFrame)\n\n\ndef is_duckdb_relation(df: Any) -> TypeIs[duckdb.DuckDBPyRelation]:\n """Check whether `df` is a DuckDB Relation without importing DuckDB.\n\n Warning:\n This method cannot be called on Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return (duckdb := get_duckdb()) is not None and isinstance(\n df, duckdb.DuckDBPyRelation\n )\n\n\ndef is_ibis_table(df: Any) -> TypeIs[ibis.Table]:\n """Check whether `df` is a Ibis Table without importing Ibis.\n\n Warning:\n This method cannot be called on Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return (ibis := get_ibis()) is not None and isinstance(df, ibis.expr.types.Table)\n\n\ndef is_polars_dataframe(df: Any) -> TypeIs[pl.DataFrame]:\n """Check whether `df` is a Polars DataFrame without importing Polars.\n\n Warning:\n This method cannot be called on a Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return (pl := get_polars()) is not None and isinstance(df, pl.DataFrame)\n\n\ndef is_polars_lazyframe(df: Any) -> TypeIs[pl.LazyFrame]:\n """Check whether `df` is a Polars LazyFrame without importing Polars.\n\n Warning:\n This method cannot be called on Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return (pl := get_polars()) is not None and isinstance(df, pl.LazyFrame)\n\n\ndef is_polars_series(ser: Any) -> TypeIs[pl.Series]:\n """Check whether `ser` is a Polars Series without importing Polars.\n\n Warning:\n This method cannot be called on Narwhals Series.\n """\n _raise_if_narwhals_series(ser)\n return (pl := get_polars()) is not None and isinstance(ser, pl.Series)\n\n\ndef is_pyarrow_chunked_array(ser: Any) -> TypeIs[pa.ChunkedArray[Any]]:\n """Check whether `ser` is a PyArrow ChunkedArray without importing PyArrow.\n\n Warning:\n This method cannot be called on Narwhals Series.\n """\n _raise_if_narwhals_series(ser)\n return (pa := get_pyarrow()) is not None and isinstance(ser, pa.ChunkedArray)\n\n\ndef is_pyarrow_table(df: Any) -> TypeIs[pa.Table]:\n """Check whether `df` is a PyArrow Table without importing PyArrow.\n\n Warning:\n This method cannot be called on Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return (pa := get_pyarrow()) is not None and isinstance(df, pa.Table)\n\n\ndef is_pyarrow_scalar(obj: Any) -> TypeIs[pa.Scalar[Any]]:\n return (pa := get_pyarrow()) is not None and isinstance(obj, pa.Scalar)\n\n\ndef is_pyspark_dataframe(df: Any) -> TypeIs[pyspark_sql.DataFrame]:\n """Check whether `df` is a PySpark DataFrame without importing PySpark.\n\n Warning:\n This method cannot be called on a Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return bool(\n (pyspark_sql := get_pyspark_sql()) is not None\n and isinstance(df, pyspark_sql.DataFrame)\n )\n\n\ndef is_pyspark_connect_dataframe(df: Any) -> TypeIs[PySparkConnectDataFrame]:\n """Check whether `df` is a PySpark Connect DataFrame without importing PySpark.\n\n Warning:\n This method cannot be called on a Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n if get_pyspark_connect() is not None: # pragma: no cover\n try:\n from pyspark.sql.connect.dataframe import DataFrame\n except ImportError:\n return False\n return isinstance(df, DataFrame)\n return False\n\n\ndef is_sqlframe_dataframe(df: Any) -> TypeIs[SQLFrameDataFrame]:\n """Check whether `df` is a SQLFrame DataFrame without importing SQLFrame.\n\n Warning:\n This method cannot be called on a Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n if get_sqlframe() is not None:\n from sqlframe.base.dataframe import BaseDataFrame\n\n return isinstance(df, BaseDataFrame)\n return False # pragma: no cover\n\n\ndef is_numpy_array(arr: Any | _NDArray[_ShapeT]) -> TypeIs[_NDArray[_ShapeT]]:\n """Check whether `arr` is a NumPy Array without importing NumPy."""\n return (np := get_numpy()) is not None and isinstance(arr, np.ndarray)\n\n\ndef is_numpy_array_1d(arr: Any) -> TypeIs[_1DArray]:\n """Check whether `arr` is a 1D NumPy Array without importing NumPy."""\n return is_numpy_array(arr) and arr.ndim == 1\n\n\ndef is_numpy_array_1d_int(arr: Any) -> TypeIs[_1DArrayInt]:\n return (\n (np := get_numpy())\n and is_numpy_array_1d(arr)\n and np.issubdtype(arr.dtype, np.integer)\n )\n\n\ndef is_numpy_array_2d(arr: Any) -> TypeIs[_2DArray]:\n """Check whether `arr` is a 2D NumPy Array without importing NumPy."""\n return is_numpy_array(arr) and arr.ndim == 2\n\n\ndef is_numpy_scalar(scalar: Any) -> TypeGuard[_NumpyScalar]:\n """Check whether `scalar` is a NumPy Scalar without importing NumPy."""\n # NOTE: Needs to stay as `TypeGuard`\n # - Used in `Series.__getitem__`, but not annotated\n # - `TypeGuard` is *hiding* that the check introduces an intersection\n return (np := get_numpy()) is not None and isinstance(scalar, np.generic)\n\n\ndef is_pandas_like_dataframe(df: Any) -> bool:\n """Check whether `df` is a pandas-like DataFrame without doing any imports.\n\n By "pandas-like", we mean: pandas, Modin, cuDF.\n\n Warning:\n This method cannot be called on a Narwhals DataFrame/LazyFrame.\n """\n _raise_if_narwhals_df_or_lf(df)\n return is_pandas_dataframe(df) or is_modin_dataframe(df) or is_cudf_dataframe(df)\n\n\ndef is_pandas_like_series(ser: Any) -> bool:\n """Check whether `ser` is a pandas-like Series without doing any imports.\n\n By "pandas-like", we mean: pandas, Modin, cuDF.\n\n Warning:\n This method cannot be called on Narwhals Series.\n """\n _raise_if_narwhals_series(ser)\n return is_pandas_series(ser) or is_modin_series(ser) or is_cudf_series(ser)\n\n\ndef is_pandas_like_index(index: Any) -> bool:\n """Check whether `index` is a pandas-like Index without doing any imports.\n\n By "pandas-like", we mean: pandas, Modin, cuDF.\n """\n return (\n is_pandas_index(index) or is_modin_index(index) or is_cudf_index(index)\n ) # pragma: no cover\n\n\ndef is_into_series(native_series: Any | IntoSeriesT) -> TypeIs[IntoSeriesT]:\n """Check whether `native_series` can be converted to a Narwhals Series.\n\n Arguments:\n native_series: The object to check.\n\n Returns:\n `True` if `native_series` can be converted to a Narwhals Series, `False` otherwise.\n\n Examples:\n >>> import pandas as pd\n >>> import polars as pl\n >>> import numpy as np\n >>> import narwhals as nw\n\n >>> s_pd = pd.Series([1, 2, 3])\n >>> s_pl = pl.Series([1, 2, 3])\n >>> np_arr = np.array([1, 2, 3])\n\n >>> nw.dependencies.is_into_series(s_pd)\n True\n >>> nw.dependencies.is_into_series(s_pl)\n True\n >>> nw.dependencies.is_into_series(np_arr)\n False\n """\n from narwhals.series import Series\n\n return (\n isinstance(native_series, Series)\n or hasattr(native_series, "__narwhals_series__")\n or is_polars_series(native_series)\n or is_pyarrow_chunked_array(native_series)\n or is_pandas_like_series(native_series)\n )\n\n\ndef is_into_dataframe(native_dataframe: Any | IntoDataFrameT) -> TypeIs[IntoDataFrameT]:\n """Check whether `native_dataframe` can be converted to a Narwhals DataFrame.\n\n Arguments:\n native_dataframe: The object to check.\n\n Returns:\n `True` if `native_dataframe` can be converted to a Narwhals DataFrame, `False` otherwise.\n\n Examples:\n >>> import pandas as pd\n >>> import polars as pl\n >>> import numpy as np\n >>> from narwhals.dependencies import is_into_dataframe\n\n >>> df_pd = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n >>> df_pl = pl.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n >>> np_arr = np.array([[1, 4], [2, 5], [3, 6]])\n\n >>> is_into_dataframe(df_pd)\n True\n >>> is_into_dataframe(df_pl)\n True\n >>> is_into_dataframe(np_arr)\n False\n """\n from narwhals.dataframe import DataFrame\n\n return (\n isinstance(native_dataframe, DataFrame)\n or hasattr(native_dataframe, "__narwhals_dataframe__")\n or is_polars_dataframe(native_dataframe)\n or is_pyarrow_table(native_dataframe)\n or is_pandas_like_dataframe(native_dataframe)\n )\n\n\ndef is_narwhals_dataframe(\n df: DataFrame[IntoDataFrameT] | Any,\n) -> TypeIs[DataFrame[IntoDataFrameT]]:\n """Check whether `df` is a Narwhals DataFrame.\n\n This is useful if you expect a user to pass in a Narwhals\n DataFrame directly, and you want to catch both `narwhals.DataFrame`\n and `narwhals.stable.v1.DataFrame`.\n """\n from narwhals.dataframe import DataFrame\n\n return isinstance(df, DataFrame)\n\n\ndef is_narwhals_lazyframe(\n lf: Any | LazyFrame[IntoLazyFrameT],\n) -> TypeIs[LazyFrame[IntoLazyFrameT]]:\n """Check whether `lf` is a Narwhals LazyFrame.\n\n This is useful if you expect a user to pass in a Narwhals\n LazyFrame directly, and you want to catch both `narwhals.LazyFrame`\n and `narwhals.stable.v1.LazyFrame`.\n """\n from narwhals.dataframe import LazyFrame\n\n return isinstance(lf, LazyFrame)\n\n\ndef is_narwhals_series(ser: Any | Series[IntoSeriesT]) -> TypeIs[Series[IntoSeriesT]]:\n """Check whether `ser` is a Narwhals Series.\n\n This is useful if you expect a user to pass in a Narwhals\n Series directly, and you want to catch both `narwhals.Series`\n and `narwhals.stable.v1.Series`.\n """\n from narwhals.series import Series\n\n return isinstance(ser, Series)\n\n\ndef is_narwhals_series_int(ser: Any | Series[IntoSeriesT]) -> TypeIs[Series[IntoSeriesT]]:\n return is_narwhals_series(ser) and ser.dtype.is_integer()\n\n\n__all__ = [\n "get_cudf",\n "get_ibis",\n "get_modin",\n "get_numpy",\n "get_pandas",\n "get_polars",\n "get_pyarrow",\n "is_cudf_dataframe",\n "is_cudf_series",\n "is_dask_dataframe",\n "is_ibis_table",\n "is_into_dataframe",\n "is_into_series",\n "is_modin_dataframe",\n "is_modin_series",\n "is_narwhals_dataframe",\n "is_narwhals_lazyframe",\n "is_narwhals_series",\n "is_numpy_array",\n "is_pandas_dataframe",\n "is_pandas_index",\n "is_pandas_like_dataframe",\n "is_pandas_like_series",\n "is_pandas_series",\n "is_polars_dataframe",\n "is_polars_lazyframe",\n "is_polars_series",\n "is_pyarrow_chunked_array",\n "is_pyarrow_table",\n]\n
.venv\Lib\site-packages\narwhals\dependencies.py
dependencies.py
Python
18,697
0.95
0.155026
0.020882
react-lib
323
2025-06-19T18:49:57.129406
GPL-3.0
false
9c2fe802038da283c9ffaae77ccada0e
from __future__ import annotations\n\nimport enum\nfrom collections import OrderedDict\nfrom collections.abc import Iterable, Mapping\nfrom datetime import timezone\nfrom itertools import starmap\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._utils import _DeferredIterable, isinstance_or_issubclass\n\nif TYPE_CHECKING:\n from collections.abc import Iterator, Sequence\n\n from typing_extensions import Self\n\n from narwhals.typing import IntoDType, TimeUnit\n\n\ndef _validate_dtype(dtype: DType | type[DType]) -> None:\n if not isinstance_or_issubclass(dtype, DType):\n msg = (\n f"Expected Narwhals dtype, got: {type(dtype)}.\n\n"\n "Hint: if you were trying to cast to a type, use e.g. nw.Int64 instead of 'int64'."\n )\n raise TypeError(msg)\n\n\nclass DType:\n def __repr__(self) -> str: # pragma: no cover\n return self.__class__.__qualname__\n\n @classmethod\n def is_numeric(cls: type[Self]) -> bool:\n return issubclass(cls, NumericType)\n\n @classmethod\n def is_integer(cls: type[Self]) -> bool:\n return issubclass(cls, IntegerType)\n\n @classmethod\n def is_signed_integer(cls: type[Self]) -> bool:\n return issubclass(cls, SignedIntegerType)\n\n @classmethod\n def is_unsigned_integer(cls: type[Self]) -> bool:\n return issubclass(cls, UnsignedIntegerType)\n\n @classmethod\n def is_float(cls: type[Self]) -> bool:\n return issubclass(cls, FloatType)\n\n @classmethod\n def is_decimal(cls: type[Self]) -> bool:\n return issubclass(cls, Decimal)\n\n @classmethod\n def is_temporal(cls: type[Self]) -> bool:\n return issubclass(cls, TemporalType)\n\n @classmethod\n def is_nested(cls: type[Self]) -> bool:\n return issubclass(cls, NestedType)\n\n def __eq__(self, other: DType | type[DType]) -> bool: # type: ignore[override]\n from narwhals._utils import isinstance_or_issubclass\n\n return isinstance_or_issubclass(other, type(self))\n\n def __hash__(self) -> int:\n return hash(self.__class__)\n\n\nclass NumericType(DType):\n """Base class for numeric data types."""\n\n\nclass IntegerType(NumericType):\n """Base class for integer data types."""\n\n\nclass SignedIntegerType(IntegerType):\n """Base class for signed integer data types."""\n\n\nclass UnsignedIntegerType(IntegerType):\n """Base class for unsigned integer data types."""\n\n\nclass FloatType(NumericType):\n """Base class for float data types."""\n\n\nclass TemporalType(DType):\n """Base class for temporal data types."""\n\n\nclass NestedType(DType):\n """Base class for nested data types."""\n\n\nclass Decimal(NumericType):\n """Decimal type.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s = pl.Series(["1.5"], dtype=pl.Decimal)\n >>> nw.from_native(s, series_only=True).dtype\n Decimal\n """\n\n\nclass Int128(SignedIntegerType):\n """128-bit signed integer type.\n\n Examples:\n >>> import polars as pl\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> import duckdb\n >>> s_native = pl.Series([2, 1, 3, 7])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> df_native = pa.table({"a": [2, 1, 3, 7]})\n >>> rel = duckdb.sql(" SELECT CAST (a AS INT128) AS a FROM df_native ")\n\n >>> s.cast(nw.Int128).dtype\n Int128\n >>> nw.from_native(rel).schema["a"]\n Int128\n """\n\n\nclass Int64(SignedIntegerType):\n """64-bit signed integer type.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([2, 1, 3, 7])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.Int64).dtype\n Int64\n """\n\n\nclass Int32(SignedIntegerType):\n """32-bit signed integer type.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array([[2, 1, 3, 7]])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.Int32).dtype\n Int32\n """\n\n\nclass Int16(SignedIntegerType):\n """16-bit signed integer type.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([2, 1, 3, 7])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.Int16).dtype\n Int16\n """\n\n\nclass Int8(SignedIntegerType):\n """8-bit signed integer type.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series([2, 1, 3, 7])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.Int8).dtype\n Int8\n """\n\n\nclass UInt128(UnsignedIntegerType):\n """128-bit unsigned integer type.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> import duckdb\n >>> df_native = pd.DataFrame({"a": [2, 1, 3, 7]})\n >>> rel = duckdb.sql(" SELECT CAST (a AS UINT128) AS a FROM df_native ")\n >>> nw.from_native(rel).schema["a"]\n UInt128\n """\n\n\nclass UInt64(UnsignedIntegerType):\n """64-bit unsigned integer type.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series([2, 1, 3, 7])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.UInt64).dtype\n UInt64\n """\n\n\nclass UInt32(UnsignedIntegerType):\n """32-bit unsigned integer type.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([2, 1, 3, 7])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.UInt32).dtype\n UInt32\n """\n\n\nclass UInt16(UnsignedIntegerType):\n """16-bit unsigned integer type.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([2, 1, 3, 7])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.UInt16).dtype\n UInt16\n """\n\n\nclass UInt8(UnsignedIntegerType):\n """8-bit unsigned integer type.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([2, 1, 3, 7])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.UInt8).dtype\n UInt8\n """\n\n\nclass Float64(FloatType):\n """64-bit floating point type.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array([[0.001, 0.1, 0.01, 0.1]])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.Float64).dtype\n Float64\n """\n\n\nclass Float32(FloatType):\n """32-bit floating point type.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([0.001, 0.1, 0.01, 0.1])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cast(nw.Float32).dtype\n Float32\n """\n\n\nclass String(DType):\n """UTF-8 encoded string type.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["beluga", "narwhal", "orca", "vaquita"])\n >>> nw.from_native(s_native, series_only=True).dtype\n String\n """\n\n\nclass Boolean(DType):\n """Boolean type.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array([[True, False, False, True]])\n >>> nw.from_native(s_native, series_only=True).dtype\n Boolean\n """\n\n\nclass Object(DType):\n """Data type for wrapping arbitrary Python objects.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> class Foo: ...\n >>> s_native = pd.Series([Foo(), Foo()])\n >>> nw.from_native(s_native, series_only=True).dtype\n Object\n """\n\n\nclass Unknown(DType):\n """Type representing DataType values that could not be determined statically.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(pd.period_range("2000-01", periods=4, freq="M"))\n >>> nw.from_native(s_native, series_only=True).dtype\n Unknown\n """\n\n\nclass _DatetimeMeta(type):\n @property\n def time_unit(cls) -> TimeUnit:\n return "us"\n\n @property\n def time_zone(cls) -> str | None:\n return None\n\n\nclass Datetime(TemporalType, metaclass=_DatetimeMeta):\n """Data type representing a calendar date and time of day.\n\n Arguments:\n time_unit: Unit of time. Defaults to `'us'` (microseconds).\n time_zone: Time zone string, as defined in zoneinfo (to see valid strings run\n `import zoneinfo; zoneinfo.available_timezones()` for a full list).\n\n Notes:\n Adapted from [Polars implementation](https://github.com/pola-rs/polars/blob/py-1.7.1/py-polars/polars/datatypes/classes.py#L398-L457)\n\n Examples:\n >>> from datetime import datetime, timedelta\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = (\n ... pl.Series([datetime(2024, 12, 9) + timedelta(days=n) for n in range(5)])\n ... .cast(pl.Datetime("ms"))\n ... .dt.replace_time_zone("Africa/Accra")\n ... )\n >>> nw.from_native(s_native, series_only=True).dtype\n Datetime(time_unit='ms', time_zone='Africa/Accra')\n """\n\n def __init__(\n self, time_unit: TimeUnit = "us", time_zone: str | timezone | None = None\n ) -> None:\n if time_unit not in {"s", "ms", "us", "ns"}:\n msg = (\n "invalid `time_unit`"\n f"\n\nExpected one of {{'ns','us','ms', 's'}}, got {time_unit!r}."\n )\n raise ValueError(msg)\n\n if isinstance(time_zone, timezone):\n time_zone = str(time_zone)\n\n self.time_unit: TimeUnit = time_unit\n self.time_zone: str | None = time_zone\n\n def __eq__(self, other: object) -> bool:\n # allow comparing object instances to class\n if type(other) is _DatetimeMeta:\n return True\n elif isinstance(other, self.__class__):\n return self.time_unit == other.time_unit and self.time_zone == other.time_zone\n else: # pragma: no cover\n return False\n\n def __hash__(self) -> int: # pragma: no cover\n return hash((self.__class__, self.time_unit, self.time_zone))\n\n def __repr__(self) -> str: # pragma: no cover\n class_name = self.__class__.__name__\n return f"{class_name}(time_unit={self.time_unit!r}, time_zone={self.time_zone!r})"\n\n\nclass _DurationMeta(type):\n @property\n def time_unit(cls) -> TimeUnit:\n return "us"\n\n\nclass Duration(TemporalType, metaclass=_DurationMeta):\n """Data type representing a time duration.\n\n Arguments:\n time_unit: Unit of time. Defaults to `'us'` (microseconds).\n\n Notes:\n Adapted from [Polars implementation](https://github.com/pola-rs/polars/blob/py-1.7.1/py-polars/polars/datatypes/classes.py#L460-L502)\n\n Examples:\n >>> from datetime import timedelta\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array(\n ... [[timedelta(seconds=d) for d in range(1, 4)]], type=pa.duration("ms")\n ... )\n >>> nw.from_native(s_native, series_only=True).dtype\n Duration(time_unit='ms')\n """\n\n def __init__(self, time_unit: TimeUnit = "us") -> None:\n if time_unit not in {"s", "ms", "us", "ns"}:\n msg = (\n "invalid `time_unit`"\n f"\n\nExpected one of {{'ns','us','ms', 's'}}, got {time_unit!r}."\n )\n raise ValueError(msg)\n\n self.time_unit: TimeUnit = time_unit\n\n def __eq__(self, other: object) -> bool:\n # allow comparing object instances to class\n if type(other) is _DurationMeta:\n return True\n elif isinstance(other, self.__class__):\n return self.time_unit == other.time_unit\n else: # pragma: no cover\n return False\n\n def __hash__(self) -> int: # pragma: no cover\n return hash((self.__class__, self.time_unit))\n\n def __repr__(self) -> str: # pragma: no cover\n class_name = self.__class__.__name__\n return f"{class_name}(time_unit={self.time_unit!r})"\n\n\nclass Categorical(DType):\n """A categorical encoding of a set of strings.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(["beluga", "narwhal", "orca"])\n >>> nw.from_native(s_native, series_only=True).cast(nw.Categorical).dtype\n Categorical\n """\n\n\nclass Enum(DType):\n """A fixed categorical encoding of a unique set of strings.\n\n Polars has an Enum data type. In pandas, ordered categories get mapped\n to Enum. PyArrow has no Enum equivalent.\n\n Examples:\n >>> import narwhals as nw\n >>> nw.Enum(["beluga", "narwhal", "orca"])\n Enum(categories=['beluga', 'narwhal', 'orca'])\n """\n\n def __init__(self, categories: Iterable[str] | type[enum.Enum]) -> None:\n self._delayed_categories: _DeferredIterable[str] | None = None\n self._cached_categories: tuple[str, ...] | None = None\n\n if isinstance(categories, _DeferredIterable):\n self._delayed_categories = categories\n elif isinstance(categories, type) and issubclass(categories, enum.Enum):\n self._cached_categories = tuple(member.value for member in categories)\n else:\n self._cached_categories = tuple(categories)\n\n @property\n def categories(self) -> tuple[str, ...]:\n if cached := self._cached_categories:\n return cached\n elif delayed := self._delayed_categories:\n self._cached_categories = delayed.to_tuple()\n return self._cached_categories\n else: # pragma: no cover\n msg = f"Internal structure of {type(self).__name__!r} is invalid."\n raise TypeError(msg)\n\n def __eq__(self, other: object) -> bool:\n # allow comparing object instances to class\n if type(other) is type:\n return other is Enum\n return isinstance(other, type(self)) and self.categories == other.categories\n\n def __hash__(self) -> int:\n return hash((self.__class__, tuple(self.categories)))\n\n def __repr__(self) -> str:\n return f"{type(self).__name__}(categories={list(self.categories)!r})"\n\n\nclass Field:\n """Definition of a single field within a `Struct` DataType.\n\n Arguments:\n name: The name of the field within its parent `Struct`.\n dtype: The `DataType` of the field's values.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> data = [{"a": 1, "b": ["narwhal", "beluga"]}, {"a": 2, "b": ["orca"]}]\n >>> ser_pa = pa.chunked_array([data])\n >>> nw.from_native(ser_pa, series_only=True).dtype.fields\n [Field('a', Int64), Field('b', List(String))]\n """\n\n name: str\n dtype: IntoDType\n\n def __init__(self, name: str, dtype: IntoDType) -> None:\n self.name = name\n self.dtype = dtype\n\n def __eq__(self, other: Field) -> bool: # type: ignore[override]\n return (self.name == other.name) & (self.dtype == other.dtype)\n\n def __hash__(self) -> int:\n return hash((self.name, self.dtype))\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f"{class_name}({self.name!r}, {self.dtype})"\n\n\nclass Struct(NestedType):\n """Struct composite type.\n\n Arguments:\n fields: The fields that make up the struct. Can be either a sequence of Field\n objects or a mapping of column names to data types.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array(\n ... [[{"a": 1, "b": ["narwhal", "beluga"]}, {"a": 2, "b": ["orca"]}]]\n ... )\n >>> nw.from_native(s_native, series_only=True).dtype\n Struct({'a': Int64, 'b': List(String)})\n """\n\n fields: list[Field]\n\n def __init__(self, fields: Sequence[Field] | Mapping[str, IntoDType]) -> None:\n if isinstance(fields, Mapping):\n self.fields = list(starmap(Field, fields.items()))\n else:\n self.fields = list(fields)\n\n def __eq__(self, other: DType | type[DType]) -> bool: # type: ignore[override]\n # The comparison allows comparing objects to classes, and specific\n # inner types to those without (eg: inner=None). if one of the\n # arguments is not specific about its inner type we infer it\n # as being equal. (See the List type for more info).\n if type(other) is type and issubclass(other, self.__class__):\n return True\n elif isinstance(other, self.__class__):\n return self.fields == other.fields\n else:\n return False\n\n def __hash__(self) -> int:\n return hash((self.__class__, tuple(self.fields)))\n\n def __iter__(self) -> Iterator[tuple[str, IntoDType]]: # pragma: no cover\n for fld in self.fields:\n yield fld.name, fld.dtype\n\n def __reversed__(self) -> Iterator[tuple[str, IntoDType]]:\n for fld in reversed(self.fields):\n yield fld.name, fld.dtype\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f"{class_name}({dict(self)})"\n\n def to_schema(self) -> OrderedDict[str, IntoDType]:\n """Return Struct dtype as a schema dict.\n\n Returns:\n Mapping from column name to dtype.\n """\n return OrderedDict(self)\n\n\nclass List(NestedType):\n """Variable length list type.\n\n Examples:\n >>> import pandas as pd\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pd.Series(\n ... [["narwhal", "orca"], ["beluga", "vaquita"]],\n ... dtype=pd.ArrowDtype(pa.large_list(pa.large_string())),\n ... )\n >>> nw.from_native(s_native, series_only=True).dtype\n List(String)\n """\n\n inner: IntoDType\n\n def __init__(self, inner: IntoDType) -> None:\n self.inner = inner\n\n def __eq__(self, other: DType | type[DType]) -> bool: # type: ignore[override]\n # This equality check allows comparison of type classes and type instances.\n # If a parent type is not specific about its inner type, we infer it as equal:\n # > list[i64] == list[i64] -> True\n # > list[i64] == list[f32] -> False\n # > list[i64] == list -> True\n\n # allow comparing object instances to class\n if type(other) is type and issubclass(other, self.__class__):\n return True\n elif isinstance(other, self.__class__):\n return self.inner == other.inner\n else:\n return False\n\n def __hash__(self) -> int:\n return hash((self.__class__, self.inner))\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f"{class_name}({self.inner!r})"\n\n\nclass Array(NestedType):\n """Fixed length list type.\n\n Arguments:\n inner: The datatype of the values within each array.\n shape: The shape of the arrays.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([[1, 2], [3, 4], [5, 6]], dtype=pl.Array(pl.Int32, 2))\n >>> nw.from_native(s_native, series_only=True).dtype\n Array(Int32, shape=(2,))\n """\n\n inner: IntoDType\n size: int\n shape: tuple[int, ...]\n\n def __init__(self, inner: IntoDType, shape: int | tuple[int, ...]) -> None:\n inner_shape: tuple[int, ...] = inner.shape if isinstance(inner, Array) else ()\n if isinstance(shape, int):\n self.inner = inner\n self.size = shape\n self.shape = (shape, *inner_shape)\n\n elif isinstance(shape, tuple) and len(shape) != 0 and isinstance(shape[0], int):\n if len(shape) > 1:\n inner = Array(inner, shape[1:])\n\n self.inner = inner\n self.size = shape[0]\n self.shape = shape + inner_shape\n\n else:\n msg = f"invalid input for shape: {shape!r}"\n raise TypeError(msg)\n\n def __eq__(self, other: DType | type[DType]) -> bool: # type: ignore[override]\n # This equality check allows comparison of type classes and type instances.\n # If a parent type is not specific about its inner type, we infer it as equal:\n # > array[i64] == array[i64] -> True\n # > array[i64] == array[f32] -> False\n # > array[i64] == array -> True\n\n # allow comparing object instances to class\n if type(other) is type and issubclass(other, self.__class__):\n return True\n elif isinstance(other, self.__class__):\n if self.shape != other.shape:\n return False\n else:\n return self.inner == other.inner\n else:\n return False\n\n def __hash__(self) -> int:\n return hash((self.__class__, self.inner, self.shape))\n\n def __repr__(self) -> str:\n # Get leaf type\n dtype_ = self\n for _ in self.shape:\n dtype_ = dtype_.inner # type: ignore[assignment]\n\n class_name = self.__class__.__name__\n return f"{class_name}({dtype_!r}, shape={self.shape})"\n\n\nclass Date(TemporalType):\n """Data type representing a calendar date.\n\n Examples:\n >>> from datetime import date, timedelta\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array(\n ... [[date(2024, 12, 1) + timedelta(days=d) for d in range(4)]]\n ... )\n >>> nw.from_native(s_native, series_only=True).dtype\n Date\n """\n\n\nclass Time(TemporalType):\n """Data type representing the time of day.\n\n Examples:\n >>> import polars as pl\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> import duckdb\n >>> from datetime import time\n >>> data = [time(9, 0), time(9, 1, 10), time(9, 2)]\n >>> ser_pl = pl.Series(data)\n >>> ser_pa = pa.chunked_array([pa.array(data, type=pa.time64("ns"))])\n >>> rel = duckdb.sql(\n ... " SELECT * FROM (VALUES (TIME '12:00:00'), (TIME '14:30:15')) df(t)"\n ... )\n\n >>> nw.from_native(ser_pl, series_only=True).dtype\n Time\n >>> nw.from_native(ser_pa, series_only=True).dtype\n Time\n >>> nw.from_native(rel).schema["t"]\n Time\n """\n\n\nclass Binary(DType):\n """Binary type.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> import pyarrow as pa\n >>> import duckdb\n >>> data = [b"test1", b"test2"]\n >>> ser_pl = pl.Series(data, dtype=pl.Binary)\n >>> ser_pa = pa.chunked_array([pa.array(data, type=pa.binary())])\n >>> rel = duckdb.sql(\n ... "SELECT * FROM (VALUES (BLOB 'test1'), (BLOB 'test2')) AS df(t)"\n ... )\n\n >>> nw.from_native(ser_pl, series_only=True).dtype\n Binary\n >>> nw.from_native(ser_pa, series_only=True).dtype\n Binary\n >>> nw.from_native(rel).schema["t"]\n Binary\n """\n
.venv\Lib\site-packages\narwhals\dtypes.py
dtypes.py
Python
23,320
0.95
0.175711
0.033727
vue-tools
841
2024-04-11T19:32:04.611563
MIT
false
a5fa030018cad8fca6c32799eaf8d87e
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from collections.abc import Collection, Iterable\n\n\nclass NarwhalsError(ValueError):\n """Base class for all Narwhals exceptions."""\n\n\nclass FormattedKeyError(KeyError):\n """KeyError with formatted error message.\n\n Python's `KeyError` has special casing around formatting\n (see https://bugs.python.org/issue2651). Use this class when the error\n message has newlines and other special format characters.\n Needed by https://github.com/tensorflow/tensorflow/issues/36857.\n """\n\n def __init__(self, message: str) -> None:\n self.message = message\n\n def __str__(self) -> str:\n return self.message\n\n\nclass ColumnNotFoundError(FormattedKeyError, NarwhalsError):\n """Exception raised when column name isn't present."""\n\n def __init__(self, message: str) -> None:\n self.message = message\n super().__init__(self.message)\n\n @classmethod\n def from_missing_and_available_column_names(\n cls, missing_columns: Iterable[str], available_columns: Collection[str], /\n ) -> ColumnNotFoundError:\n message = (\n f"The following columns were not found: {sorted(missing_columns)}"\n f"\n\nHint: Did you mean one of these columns: {list(available_columns)}?"\n )\n return ColumnNotFoundError(message)\n\n\nclass ComputeError(NarwhalsError):\n """Exception raised when the underlying computation could not be evaluated."""\n\n\nclass ShapeError(NarwhalsError):\n """Exception raised when trying to perform operations on data structures with incompatible shapes."""\n\n\nclass MultiOutputExpressionError(NarwhalsError):\n """Exception raised when using multi-output expression in unsupported context."""\n\n\nclass DuplicateError(NarwhalsError):\n """Exception when duplicate column names are encountered."""\n\n\nclass InvalidOperationError(NarwhalsError):\n """Exception raised during invalid operations."""\n\n\nclass InvalidIntoExprError(TypeError, NarwhalsError):\n """Exception raised when object can't be converted to expression."""\n\n def __init__(self, message: str) -> None:\n self.message = message\n super().__init__(self.message)\n\n @classmethod\n def from_invalid_type(cls: type, invalid_type: type) -> InvalidIntoExprError:\n message = (\n f"Expected an object which can be converted into an expression, got {invalid_type}\n\n"\n "Hint:\n"\n "- if you were trying to select a column which does not have a string\n"\n " column name, then you should explicitly use `nw.col`.\n"\n " For example, `df.select(nw.col(0))` if you have a column named `0`.\n"\n "- if you were trying to create a new literal column, then you \n"\n " should explicitly use `nw.lit`.\n"\n " For example, `df.select(nw.lit(0))` if you want to create a new\n"\n " column with literal value `0`."\n )\n return InvalidIntoExprError(message)\n\n\nclass AnonymousExprError(NarwhalsError): # pragma: no cover\n """Exception raised when trying to perform operations on anonymous expressions."""\n\n def __init__(self, message: str) -> None:\n self.message = message\n super().__init__(self.message)\n\n @classmethod\n def from_expr_name(cls: type, expr_name: str) -> AnonymousExprError:\n message = (\n f"Anonymous expressions are not supported in `{expr_name}`.\n"\n "Instead of `nw.all()`, try using a named expression, such as "\n "`nw.col('a', 'b')`"\n )\n return AnonymousExprError(message)\n\n\nclass OrderDependentExprError(NarwhalsError):\n """Exception raised when trying to use an order-dependent expressions with LazyFrames."""\n\n def __init__(self, message: str) -> None:\n self.message = message\n super().__init__(self.message)\n\n\nclass LengthChangingExprError(NarwhalsError):\n """Exception raised when trying to use an expression which changes length with LazyFrames."""\n\n def __init__(self, message: str) -> None:\n self.message = message\n super().__init__(self.message)\n\n\nclass UnsupportedDTypeError(NarwhalsError):\n """Exception raised when trying to convert to a DType which is not supported by the given backend."""\n\n\nclass NarwhalsUnstableWarning(UserWarning):\n """Warning issued when a method or function is considered unstable in the stable api."""\n
.venv\Lib\site-packages\narwhals\exceptions.py
exceptions.py
Python
4,455
0.95
0.265625
0
python-kit
945
2025-01-16T01:10:16.924321
BSD-3-Clause
false
f29dff464cba06a8de74117a45986a0d
from __future__ import annotations\n\nimport math\nfrom collections.abc import Iterable, Mapping, Sequence\nfrom typing import TYPE_CHECKING, Any, Callable\n\nfrom narwhals._expression_parsing import (\n ExprMetadata,\n apply_n_ary_operation,\n combine_metadata,\n extract_compliant,\n)\nfrom narwhals._utils import (\n _validate_rolling_arguments,\n ensure_type,\n flatten,\n issue_deprecation_warning,\n)\nfrom narwhals.dtypes import _validate_dtype\nfrom narwhals.exceptions import InvalidOperationError\nfrom narwhals.expr_cat import ExprCatNamespace\nfrom narwhals.expr_dt import ExprDateTimeNamespace\nfrom narwhals.expr_list import ExprListNamespace\nfrom narwhals.expr_name import ExprNameNamespace\nfrom narwhals.expr_str import ExprStringNamespace\nfrom narwhals.expr_struct import ExprStructNamespace\nfrom narwhals.translate import to_native\n\nif TYPE_CHECKING:\n from typing import TypeVar\n\n from typing_extensions import Concatenate, ParamSpec, Self, TypeAlias\n\n from narwhals._compliant import CompliantExpr, CompliantNamespace\n from narwhals.dtypes import DType\n from narwhals.typing import (\n ClosedInterval,\n FillNullStrategy,\n IntoDType,\n IntoExpr,\n NonNestedLiteral,\n NumericLiteral,\n RankMethod,\n RollingInterpolationMethod,\n TemporalLiteral,\n )\n\n PS = ParamSpec("PS")\n R = TypeVar("R")\n _ToCompliant: TypeAlias = Callable[\n [CompliantNamespace[Any, Any]], CompliantExpr[Any, Any]\n ]\n\n\nclass Expr:\n def __init__(self, to_compliant_expr: _ToCompliant, metadata: ExprMetadata) -> None:\n # callable from CompliantNamespace to CompliantExpr\n def func(plx: CompliantNamespace[Any, Any]) -> CompliantExpr[Any, Any]:\n result = to_compliant_expr(plx)\n result._metadata = self._metadata\n return result\n\n self._to_compliant_expr: _ToCompliant = func\n self._metadata = metadata\n\n def _with_elementwise_op(self, to_compliant_expr: Callable[[Any], Any]) -> Self:\n return self.__class__(to_compliant_expr, self._metadata.with_elementwise_op())\n\n def _with_aggregation(self, to_compliant_expr: Callable[[Any], Any]) -> Self:\n return self.__class__(to_compliant_expr, self._metadata.with_aggregation())\n\n def _with_orderable_aggregation(\n self, to_compliant_expr: Callable[[Any], Any]\n ) -> Self:\n return self.__class__(\n to_compliant_expr, self._metadata.with_orderable_aggregation()\n )\n\n def _with_orderable_window(self, to_compliant_expr: Callable[[Any], Any]) -> Self:\n return self.__class__(to_compliant_expr, self._metadata.with_orderable_window())\n\n def _with_unorderable_window(self, to_compliant_expr: Callable[[Any], Any]) -> Self:\n return self.__class__(to_compliant_expr, self._metadata.with_unorderable_window())\n\n def _with_filtration(self, to_compliant_expr: Callable[[Any], Any]) -> Self:\n return self.__class__(to_compliant_expr, self._metadata.with_filtration())\n\n def _with_orderable_filtration(self, to_compliant_expr: Callable[[Any], Any]) -> Self:\n return self.__class__(\n to_compliant_expr, self._metadata.with_orderable_filtration()\n )\n\n def __repr__(self) -> str:\n return f"Narwhals Expr\nmetadata: {self._metadata}\n"\n\n def _taxicab_norm(self) -> Self:\n # This is just used to test out the stable api feature in a realistic-ish way.\n # It's not intended to be used.\n return self._with_aggregation(\n lambda plx: self._to_compliant_expr(plx).abs().sum()\n )\n\n # --- convert ---\n def alias(self, name: str) -> Self:\n """Rename the expression.\n\n Arguments:\n name: The new name.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2], "b": [4, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.select((nw.col("b") + 10).alias("c"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | c |\n | 0 14 |\n | 1 15 |\n └──────────────────┘\n """\n # Don't use `_with_elementwise_op` so that `_metadata.last_node` is preserved.\n return self.__class__(\n lambda plx: self._to_compliant_expr(plx).alias(name), self._metadata\n )\n\n def pipe(\n self,\n function: Callable[Concatenate[Self, PS], R],\n *args: PS.args,\n **kwargs: PS.kwargs,\n ) -> R:\n """Pipe function call.\n\n Arguments:\n function: Function to apply.\n args: Positional arguments to pass to function.\n kwargs: Keyword arguments to pass to function.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3, 4]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_piped=nw.col("a").pipe(lambda x: x + 1))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a a_piped |\n | 0 1 2 |\n | 1 2 3 |\n | 2 3 4 |\n | 3 4 5 |\n └──────────────────┘\n """\n return function(self, *args, **kwargs)\n\n def cast(self, dtype: IntoDType) -> Self:\n """Redefine an object's data type.\n\n Arguments:\n dtype: Data type that the object will be cast into.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("foo").cast(nw.Float32), nw.col("bar").cast(nw.UInt8))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | foo bar |\n | 0 1.0 6 |\n | 1 2.0 7 |\n | 2 3.0 8 |\n └──────────────────┘\n """\n _validate_dtype(dtype)\n return self._with_elementwise_op(\n lambda plx: self._to_compliant_expr(plx).cast(dtype)\n )\n\n # --- binary ---\n def __eq__(self, other: Self | Any) -> Self: # type: ignore[override]\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x == y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __ne__(self, other: Self | Any) -> Self: # type: ignore[override]\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x != y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __and__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x & y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __rand__(self, other: Any) -> Self:\n return (self & other).alias("literal") # type: ignore[no-any-return]\n\n def __or__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x | y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __ror__(self, other: Any) -> Self:\n return (self | other).alias("literal") # type: ignore[no-any-return]\n\n def __add__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x + y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __radd__(self, other: Any) -> Self:\n return (self + other).alias("literal") # type: ignore[no-any-return]\n\n def __sub__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x - y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __rsub__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x.__rsub__(y), self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __truediv__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x / y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __rtruediv__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x.__rtruediv__(y), self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __mul__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x * y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __rmul__(self, other: Any) -> Self:\n return (self * other).alias("literal") # type: ignore[no-any-return]\n\n def __le__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x <= y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __lt__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x < y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __gt__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x > y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __ge__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x >= y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __pow__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x**y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __rpow__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x.__rpow__(y), self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __floordiv__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x // y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __rfloordiv__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x.__rfloordiv__(y), self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __mod__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x % y, self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n def __rmod__(self, other: Any) -> Self:\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, lambda x, y: x.__rmod__(y), self, other, str_as_lit=True\n ),\n ExprMetadata.from_binary_op(self, other),\n )\n\n # --- unary ---\n def __invert__(self) -> Self:\n return self._with_elementwise_op(\n lambda plx: self._to_compliant_expr(plx).__invert__()\n )\n\n def any(self) -> Self:\n """Return whether any of the values in the column are `True`.\n\n If there are no non-null elements, the result is `False`.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [True, False], "b": [True, True]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").any())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 True True |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).any())\n\n def all(self) -> Self:\n """Return whether all values in the column are `True`.\n\n If there are no non-null elements, the result is `True`.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [True, False], "b": [True, True]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").all())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 False True |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).all())\n\n def ewm_mean(\n self,\n *,\n com: float | None = None,\n span: float | None = None,\n half_life: float | None = None,\n alpha: float | None = None,\n adjust: bool = True,\n min_samples: int = 1,\n ignore_nulls: bool = False,\n ) -> Self:\n r"""Compute exponentially-weighted moving average.\n\n Arguments:\n com: Specify decay in terms of center of mass, $\gamma$, with <br> $\alpha = \frac{1}{1+\gamma}\forall\gamma\geq0$\n span: Specify decay in terms of span, $\theta$, with <br> $\alpha = \frac{2}{\theta + 1} \forall \theta \geq 1$\n half_life: Specify decay in terms of half-life, $\tau$, with <br> $\alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \tau } \right\} \forall \tau > 0$\n alpha: Specify smoothing factor alpha directly, $0 < \alpha \leq 1$.\n adjust: Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings\n\n - When `adjust=True` (the default) the EW function is calculated\n using weights $w_i = (1 - \alpha)^i$\n - When `adjust=False` the EW function is calculated recursively by\n $$\n y_0=x_0\n $$\n $$\n y_t = (1 - \alpha)y_{t - 1} + \alpha x_t\n $$\n min_samples: Minimum number of observations in window required to have a value, (otherwise result is null).\n ignore_nulls: Ignore missing values when calculating weights.\n\n - When `ignore_nulls=False` (default), weights are based on absolute\n positions.\n For example, the weights of $x_0$ and $x_2$ used in\n calculating the final weighted average of $[x_0, None, x_2]$ are\n $(1-\alpha)^2$ and $1$ if `adjust=True`, and\n $(1-\alpha)^2$ and $\alpha$ if `adjust=False`.\n - When `ignore_nulls=True`, weights are based\n on relative positions. For example, the weights of\n $x_0$ and $x_2$ used in calculating the final weighted\n average of $[x_0, None, x_2]$ are\n $1-\alpha$ and $1$ if `adjust=True`,\n and $1-\alpha$ and $\alpha$ if `adjust=False`.\n\n Returns:\n Expr\n\n Examples:\n >>> import pandas as pd\n >>> import polars as pl\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoFrameT\n >>>\n >>> data = {"a": [1, 2, 3]}\n >>> df_pd = pd.DataFrame(data)\n >>> df_pl = pl.DataFrame(data)\n\n We define a library agnostic function:\n\n >>> def agnostic_ewm_mean(df_native: IntoFrameT) -> IntoFrameT:\n ... df = nw.from_native(df_native)\n ... return df.select(\n ... nw.col("a").ewm_mean(com=1, ignore_nulls=False)\n ... ).to_native()\n\n We can then pass either pandas or Polars to `agnostic_ewm_mean`:\n\n >>> agnostic_ewm_mean(df_pd)\n a\n 0 1.000000\n 1 1.666667\n 2 2.428571\n\n >>> agnostic_ewm_mean(df_pl) # doctest: +NORMALIZE_WHITESPACE\n shape: (3, 1)\n ┌──────────┐\n │ a │\n │ --- │\n │ f64 │\n ╞══════════╡\n │ 1.0 │\n │ 1.666667 │\n │ 2.428571 │\n └──────────┘\n """\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).ewm_mean(\n com=com,\n span=span,\n half_life=half_life,\n alpha=alpha,\n adjust=adjust,\n min_samples=min_samples,\n ignore_nulls=ignore_nulls,\n )\n )\n\n def mean(self) -> Self:\n """Get mean value.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [-1, 0, 1], "b": [2, 4, 6]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").mean())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 0.0 4.0 |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).mean())\n\n def median(self) -> Self:\n """Get median value.\n\n Returns:\n A new expression.\n\n Notes:\n Results might slightly differ across backends due to differences in the underlying algorithms used to compute the median.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 8, 3], "b": [4, 5, 2]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").median())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 3.0 4.0 |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).median())\n\n def std(self, *, ddof: int = 1) -> Self:\n """Get standard deviation.\n\n Arguments:\n ddof: "Delta Degrees of Freedom": the divisor used in the calculation is N - ddof,\n where N represents the number of elements. By default ddof is 1.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [20, 25, 60], "b": [1.5, 1, -1.4]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").std(ddof=0))\n ┌─────────────────────┐\n | Narwhals DataFrame |\n |---------------------|\n | a b|\n |0 17.79513 1.265789|\n └─────────────────────┘\n """\n return self._with_aggregation(\n lambda plx: self._to_compliant_expr(plx).std(ddof=ddof)\n )\n\n def var(self, *, ddof: int = 1) -> Self:\n """Get variance.\n\n Arguments:\n ddof: "Delta Degrees of Freedom": the divisor used in the calculation is N - ddof,\n where N represents the number of elements. By default ddof is 1.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [20, 25, 60], "b": [1.5, 1, -1.4]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").var(ddof=0))\n ┌───────────────────────┐\n | Narwhals DataFrame |\n |-----------------------|\n | a b|\n |0 316.666667 1.602222|\n └───────────────────────┘\n """\n return self._with_aggregation(\n lambda plx: self._to_compliant_expr(plx).var(ddof=ddof)\n )\n\n def map_batches(\n self,\n function: Callable[[Any], CompliantExpr[Any, Any]],\n return_dtype: DType | None = None,\n ) -> Self:\n """Apply a custom python function to a whole Series or sequence of Series.\n\n The output of this custom function is presumed to be either a Series,\n or a NumPy array (in which case it will be automatically converted into\n a Series).\n\n Arguments:\n function: Function to apply to Series.\n return_dtype: Dtype of the output Series.\n If not set, the dtype will be inferred based on the first non-null value\n that is returned by the function.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("a", "b")\n ... .map_batches(lambda s: s.to_numpy() + 1, return_dtype=nw.Float64)\n ... .name.suffix("_mapped")\n ... )\n ┌───────────────────────────┐\n | Narwhals DataFrame |\n |---------------------------|\n | a b a_mapped b_mapped|\n |0 1 4 2.0 5.0|\n |1 2 5 3.0 6.0|\n |2 3 6 4.0 7.0|\n └───────────────────────────┘\n """\n # safest assumptions\n return self._with_orderable_filtration(\n lambda plx: self._to_compliant_expr(plx).map_batches(\n function=function, return_dtype=return_dtype\n )\n )\n\n def skew(self) -> Self:\n """Calculate the sample skewness of a column.\n\n Returns:\n An expression representing the sample skewness of the column.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 1, 2, 10, 100]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").skew())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 0.0 1.472427 |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).skew())\n\n def kurtosis(self) -> Self:\n """Compute the kurtosis (Fisher's definition) without bias correction.\n\n Kurtosis is the fourth central moment divided by the square of the variance.\n The Fisher's definition is used where 3.0 is subtracted from the result to give 0.0 for a normal distribution.\n\n Returns:\n An expression representing the kurtosis (Fisher's definition) without bias correction of the column.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 1, 2, 10, 100]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").kurtosis())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 -1.3 0.210657 |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).kurtosis())\n\n def sum(self) -> Expr:\n """Return the sum value.\n\n If there are no non-null elements, the result is zero.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import duckdb\n >>> import narwhals as nw\n >>> df_native = duckdb.sql("SELECT * FROM VALUES (5, 50), (10, 100) df(a, b)")\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").sum())\n ┌───────────────────┐\n |Narwhals LazyFrame |\n |-------------------|\n |┌────────┬────────┐|\n |│ a │ b │|\n |│ int128 │ int128 │|\n |├────────┼────────┤|\n |│ 15 │ 150 │|\n |└────────┴────────┘|\n └───────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).sum())\n\n def min(self) -> Self:\n """Returns the minimum value(s) from a column(s).\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2], "b": [4, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.min("a", "b"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 1 3 |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).min())\n\n def max(self) -> Self:\n """Returns the maximum value(s) from a column(s).\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [10, 20], "b": [50, 100]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.max("a", "b"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 20 100 |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).max())\n\n def arg_min(self) -> Self:\n """Returns the index of the minimum value.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [10, 20], "b": [150, 100]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").arg_min().name.suffix("_arg_min"))\n ┌───────────────────────┐\n | Narwhals DataFrame |\n |-----------------------|\n | a_arg_min b_arg_min|\n |0 0 1|\n └───────────────────────┘\n """\n return self._with_orderable_aggregation(\n lambda plx: self._to_compliant_expr(plx).arg_min()\n )\n\n def arg_max(self) -> Self:\n """Returns the index of the maximum value.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [10, 20], "b": [150, 100]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").arg_max().name.suffix("_arg_max"))\n ┌───────────────────────┐\n | Narwhals DataFrame |\n |-----------------------|\n | a_arg_max b_arg_max|\n |0 1 0|\n └───────────────────────┘\n """\n return self._with_orderable_aggregation(\n lambda plx: self._to_compliant_expr(plx).arg_max()\n )\n\n def count(self) -> Self:\n """Returns the number of non-null elements in the column.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3], "b": [None, 4, 4]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.all().count())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 3 2 |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).count())\n\n def n_unique(self) -> Self:\n """Returns count of unique values.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 1, 3, 3, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").n_unique())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 5 3 |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).n_unique())\n\n def unique(self) -> Self:\n """Return unique values of this expression.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 1, 3, 5, 5], "b": [2, 4, 4, 6, 6]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").unique().sum())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 9 12 |\n └──────────────────┘\n """\n return self._with_filtration(lambda plx: self._to_compliant_expr(plx).unique())\n\n def abs(self) -> Self:\n """Return absolute value of each element.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, -2], "b": [-3, 4]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.col("a", "b").abs().name.suffix("_abs"))\n ┌─────────────────────┐\n | Narwhals DataFrame |\n |---------------------|\n | a b a_abs b_abs|\n |0 1 -3 1 3|\n |1 -2 4 2 4|\n └─────────────────────┘\n """\n return self._with_elementwise_op(lambda plx: self._to_compliant_expr(plx).abs())\n\n def cum_sum(self, *, reverse: bool = False) -> Self:\n """Return cumulative sum.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 1, 3, 5, 5], "b": [2, 4, 4, 6, 6]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_cum_sum=nw.col("a").cum_sum())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b a_cum_sum|\n |0 1 2 1|\n |1 1 4 2|\n |2 3 4 5|\n |3 5 6 10|\n |4 5 6 15|\n └──────────────────┘\n """\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).cum_sum(reverse=reverse)\n )\n\n def diff(self) -> Self:\n """Returns the difference between each element and the previous one.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Returns:\n A new expression.\n\n Notes:\n pandas may change the dtype here, for example when introducing missing\n values in an integer column. To ensure, that the dtype doesn't change,\n you may want to use `fill_null` and `cast`. For example, to calculate\n the diff and fill missing values with `0` in a Int64 column, you could\n do:\n\n nw.col("a").diff().fill_null(0).cast(nw.Int64)\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 1, 3, 5, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_diff=nw.col("a").diff())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | shape: (5, 2) |\n | ┌─────┬────────┐ |\n | │ a ┆ a_diff │ |\n | │ --- ┆ --- │ |\n | │ i64 ┆ i64 │ |\n | ╞═════╪════════╡ |\n | │ 1 ┆ null │ |\n | │ 1 ┆ 0 │ |\n | │ 3 ┆ 2 │ |\n | │ 5 ┆ 2 │ |\n | │ 5 ┆ 0 │ |\n | └─────┴────────┘ |\n └──────────────────┘\n """\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).diff()\n )\n\n def shift(self, n: int) -> Self:\n """Shift values by `n` positions.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n n: Number of positions to shift values by.\n\n Returns:\n A new expression.\n\n Notes:\n pandas may change the dtype here, for example when introducing missing\n values in an integer column. To ensure, that the dtype doesn't change,\n you may want to use `fill_null` and `cast`. For example, to shift\n and fill missing values with `0` in a Int64 column, you could\n do:\n\n nw.col("a").shift(1).fill_null(0).cast(nw.Int64)\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 1, 3, 5, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_shift=nw.col("a").shift(n=1))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n |shape: (5, 2) |\n |┌─────┬─────────┐ |\n |│ a ┆ a_shift │ |\n |│ --- ┆ --- │ |\n |│ i64 ┆ i64 │ |\n |╞═════╪═════════╡ |\n |│ 1 ┆ null │ |\n |│ 1 ┆ 1 │ |\n |│ 3 ┆ 1 │ |\n |│ 5 ┆ 3 │ |\n |│ 5 ┆ 5 │ |\n |└─────┴─────────┘ |\n └──────────────────┘\n """\n ensure_type(n, int, param_name="n")\n\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).shift(n)\n )\n\n def replace_strict(\n self,\n old: Sequence[Any] | Mapping[Any, Any],\n new: Sequence[Any] | None = None,\n *,\n return_dtype: IntoDType | None = None,\n ) -> Self:\n """Replace all values by different values.\n\n This function must replace all non-null input values (else it raises an error).\n\n Arguments:\n old: Sequence of values to replace. It also accepts a mapping of values to\n their replacement as syntactic sugar for\n `replace_strict(old=list(mapping.keys()), new=list(mapping.values()))`.\n new: Sequence of values to replace by. Length must match the length of `old`.\n return_dtype: The data type of the resulting expression. If set to `None`\n (default), the data type is determined automatically based on the other\n inputs.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [3, 0, 1, 2]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... b=nw.col("a").replace_strict(\n ... [0, 1, 2, 3],\n ... ["zero", "one", "two", "three"],\n ... return_dtype=nw.String,\n ... )\n ... )\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 3 three |\n | 1 0 zero |\n | 2 1 one |\n | 3 2 two |\n └──────────────────┘\n """\n if new is None:\n if not isinstance(old, Mapping):\n msg = "`new` argument is required if `old` argument is not a Mapping type"\n raise TypeError(msg)\n\n new = list(old.values())\n old = list(old.keys())\n\n return self._with_elementwise_op(\n lambda plx: self._to_compliant_expr(plx).replace_strict(\n old, new, return_dtype=return_dtype\n )\n )\n\n def sort(self, *, descending: bool = False, nulls_last: bool = False) -> Self:\n """Sort this column. Place null values first.\n\n Warning:\n `Expr.sort` is deprecated and will be removed in a future version.\n Hint: instead of `df.select(nw.col('a').sort())`, use\n `df.select(nw.col('a')).sort()` instead.\n Note: this will remain available in `narwhals.stable.v1`.\n See [stable api](../backcompat.md/) for more information.\n\n Arguments:\n descending: Sort in descending order.\n nulls_last: Place null values last instead of first.\n\n Returns:\n A new expression.\n """\n msg = (\n "`Expr.sort` is deprecated and will be removed in a future version.\n\n"\n "Hint: instead of `df.select(nw.col('a').sort())`, use `df.select(nw.col('a')).sort()`.\n\n"\n "Note: this will remain available in `narwhals.stable.v1`.\n"\n "See https://narwhals-dev.github.io/narwhals/backcompat/ for more information.\n"\n )\n issue_deprecation_warning(msg, _version="1.23.0")\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).sort(\n descending=descending, nulls_last=nulls_last\n )\n )\n\n # --- transform ---\n def is_between(\n self,\n lower_bound: Any | IntoExpr,\n upper_bound: Any | IntoExpr,\n closed: ClosedInterval = "both",\n ) -> Self:\n """Check if this expression is between the given lower and upper bounds.\n\n Arguments:\n lower_bound: Lower bound value. String literals are interpreted as column names.\n upper_bound: Upper bound value. String literals are interpreted as column names.\n closed: Define which sides of the interval are closed (inclusive).\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3, 4, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(b=nw.col("a").is_between(2, 4, "right"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 1 False |\n | 1 2 False |\n | 2 3 True |\n | 3 4 True |\n | 4 5 False |\n └──────────────────┘\n """\n\n def func(\n compliant_expr: CompliantExpr[Any, Any],\n lb: CompliantExpr[Any, Any],\n ub: CompliantExpr[Any, Any],\n ) -> CompliantExpr[Any, Any]:\n if closed == "left":\n return (compliant_expr >= lb) & (compliant_expr < ub)\n elif closed == "right":\n return (compliant_expr > lb) & (compliant_expr <= ub)\n elif closed == "none":\n return (compliant_expr > lb) & (compliant_expr < ub)\n return (compliant_expr >= lb) & (compliant_expr <= ub)\n\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx, func, self, lower_bound, upper_bound, str_as_lit=False\n ),\n combine_metadata(\n self,\n lower_bound,\n upper_bound,\n str_as_lit=False,\n allow_multi_output=False,\n to_single_output=False,\n ),\n )\n\n def is_in(self, other: Any) -> Self:\n """Check if elements of this expression are present in the other iterable.\n\n Arguments:\n other: iterable\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 9, 10]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(b=nw.col("a").is_in([1, 2]))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 1 True |\n | 1 2 True |\n | 2 9 False |\n | 3 10 False |\n └──────────────────┘\n """\n if isinstance(other, Iterable) and not isinstance(other, (str, bytes)):\n return self._with_elementwise_op(\n lambda plx: self._to_compliant_expr(plx).is_in(\n to_native(other, pass_through=True)\n )\n )\n else:\n msg = "Narwhals `is_in` doesn't accept expressions as an argument, as opposed to Polars. You should provide an iterable instead."\n raise NotImplementedError(msg)\n\n def filter(self, *predicates: Any) -> Self:\n """Filters elements based on a condition, returning a new expression.\n\n Arguments:\n predicates: Conditions to filter by (which get ANDed together).\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {"a": [2, 3, 4, 5, 6, 7], "b": [10, 11, 12, 13, 14, 15]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.select(\n ... nw.col("a").filter(nw.col("a") > 4),\n ... nw.col("b").filter(nw.col("b") < 13),\n ... )\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 3 5 10 |\n | 4 6 11 |\n | 5 7 12 |\n └──────────────────┘\n """\n flat_predicates = flatten(predicates)\n metadata = combine_metadata(\n self,\n *flat_predicates,\n str_as_lit=False,\n allow_multi_output=True,\n to_single_output=False,\n ).with_filtration()\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx,\n lambda *exprs: exprs[0].filter(*exprs[1:]),\n self,\n *flat_predicates,\n str_as_lit=False,\n ),\n metadata,\n )\n\n def is_null(self) -> Self:\n """Returns a boolean Series indicating which values are null.\n\n Returns:\n A new expression.\n\n Notes:\n pandas handles null values differently from Polars and PyArrow.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n\n Examples:\n >>> import duckdb\n >>> import narwhals as nw\n >>> df_native = duckdb.sql(\n ... "SELECT * FROM VALUES (null, CAST('NaN' AS DOUBLE)), (2, 2.) df(a, b)"\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_is_null=nw.col("a").is_null(), b_is_null=nw.col("b").is_null()\n ... )\n ┌──────────────────────────────────────────┐\n | Narwhals LazyFrame |\n |------------------------------------------|\n |┌───────┬────────┬───────────┬───────────┐|\n |│ a │ b │ a_is_null │ b_is_null │|\n |│ int32 │ double │ boolean │ boolean │|\n |├───────┼────────┼───────────┼───────────┤|\n |│ NULL │ nan │ true │ false │|\n |│ 2 │ 2.0 │ false │ false │|\n |└───────┴────────┴───────────┴───────────┘|\n └──────────────────────────────────────────┘\n """\n return self._with_elementwise_op(\n lambda plx: self._to_compliant_expr(plx).is_null()\n )\n\n def is_nan(self) -> Self:\n """Indicate which values are NaN.\n\n Returns:\n A new expression.\n\n Notes:\n pandas handles null values differently from Polars and PyArrow.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n\n Examples:\n >>> import duckdb\n >>> import narwhals as nw\n >>> df_native = duckdb.sql(\n ... "SELECT * FROM VALUES (null, CAST('NaN' AS DOUBLE)), (2, 2.) df(a, b)"\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_is_nan=nw.col("a").is_nan(), b_is_nan=nw.col("b").is_nan()\n ... )\n ┌────────────────────────────────────────┐\n | Narwhals LazyFrame |\n |----------------------------------------|\n |┌───────┬────────┬──────────┬──────────┐|\n |│ a │ b │ a_is_nan │ b_is_nan │|\n |│ int32 │ double │ boolean │ boolean │|\n |├───────┼────────┼──────────┼──────────┤|\n |│ NULL │ nan │ NULL │ true │|\n |│ 2 │ 2.0 │ false │ false │|\n |└───────┴────────┴──────────┴──────────┘|\n └────────────────────────────────────────┘\n """\n return self._with_elementwise_op(\n lambda plx: self._to_compliant_expr(plx).is_nan()\n )\n\n def arg_true(self) -> Self:\n """Find elements where boolean expression is True.\n\n Returns:\n A new expression.\n """\n msg = (\n "`Expr.arg_true` is deprecated and will be removed in a future version.\n\n"\n "Note: this will remain available in `narwhals.stable.v1`.\n"\n "See https://narwhals-dev.github.io/narwhals/backcompat/ for more information.\n"\n )\n issue_deprecation_warning(msg, _version="1.23.0")\n return self._with_filtration(lambda plx: self._to_compliant_expr(plx).arg_true())\n\n def fill_null(\n self,\n value: Expr | NonNestedLiteral = None,\n strategy: FillNullStrategy | None = None,\n limit: int | None = None,\n ) -> Self:\n """Fill null values with given value.\n\n Arguments:\n value: Value or expression used to fill null values.\n strategy: Strategy used to fill null values.\n limit: Number of consecutive null values to fill when using the 'forward' or 'backward' strategy.\n\n Returns:\n A new expression.\n\n Notes:\n - pandas handles null values differently from other libraries.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n - For pandas Series of `object` dtype, `fill_null` will not automatically change the\n Series' dtype as pandas used to do. Explicitly call `cast` if you want the dtype to change.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame(\n ... {\n ... "a": [2, None, None, 3],\n ... "b": [2.0, float("nan"), float("nan"), 3.0],\n ... "c": [1, 2, 3, 4],\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("a", "b").fill_null(0).name.suffix("_filled"),\n ... nw.col("a").fill_null(nw.col("c")).name.suffix("_filled_with_c"),\n ... )\n ┌────────────────────────────────────────────────────────────┐\n | Narwhals DataFrame |\n |------------------------------------------------------------|\n |shape: (4, 6) |\n |┌──────┬─────┬─────┬──────────┬──────────┬─────────────────┐|\n |│ a ┆ b ┆ c ┆ a_filled ┆ b_filled ┆ a_filled_with_c │|\n |│ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │|\n |│ i64 ┆ f64 ┆ i64 ┆ i64 ┆ f64 ┆ i64 │|\n |╞══════╪═════╪═════╪══════════╪══════════╪═════════════════╡|\n |│ 2 ┆ 2.0 ┆ 1 ┆ 2 ┆ 2.0 ┆ 2 │|\n |│ null ┆ NaN ┆ 2 ┆ 0 ┆ NaN ┆ 2 │|\n |│ null ┆ NaN ┆ 3 ┆ 0 ┆ NaN ┆ 3 │|\n |│ 3 ┆ 3.0 ┆ 4 ┆ 3 ┆ 3.0 ┆ 3 │|\n |└──────┴─────┴─────┴──────────┴──────────┴─────────────────┘|\n └────────────────────────────────────────────────────────────┘\n\n Using a strategy:\n\n >>> df.select(\n ... nw.col("a", "b"),\n ... nw.col("a", "b")\n ... .fill_null(strategy="forward", limit=1)\n ... .name.suffix("_nulls_forward_filled"),\n ... )\n ┌────────────────────────────────────────────────────────────────┐\n | Narwhals DataFrame |\n |----------------------------------------------------------------|\n |shape: (4, 4) |\n |┌──────┬─────┬────────────────────────┬────────────────────────┐|\n |│ a ┆ b ┆ a_nulls_forward_filled ┆ b_nulls_forward_filled │|\n |│ --- ┆ --- ┆ --- ┆ --- │|\n |│ i64 ┆ f64 ┆ i64 ┆ f64 │|\n |╞══════╪═════╪════════════════════════╪════════════════════════╡|\n |│ 2 ┆ 2.0 ┆ 2 ┆ 2.0 │|\n |│ null ┆ NaN ┆ 2 ┆ NaN │|\n |│ null ┆ NaN ┆ null ┆ NaN │|\n |│ 3 ┆ 3.0 ┆ 3 ┆ 3.0 │|\n |└──────┴─────┴────────────────────────┴────────────────────────┘|\n └────────────────────────────────────────────────────────────────┘\n """\n if value is not None and strategy is not None:\n msg = "cannot specify both `value` and `strategy`"\n raise ValueError(msg)\n if value is None and strategy is None:\n msg = "must specify either a fill `value` or `strategy`"\n raise ValueError(msg)\n if strategy is not None and strategy not in {"forward", "backward"}:\n msg = f"strategy not supported: {strategy}"\n raise ValueError(msg)\n\n return self.__class__(\n lambda plx: self._to_compliant_expr(plx).fill_null(\n value=extract_compliant(plx, value, str_as_lit=True),\n strategy=strategy,\n limit=limit,\n ),\n self._metadata.with_orderable_window()\n if strategy is not None\n else self._metadata,\n )\n\n # --- partial reduction ---\n def drop_nulls(self) -> Self:\n """Drop null values.\n\n Returns:\n A new expression.\n\n Notes:\n pandas handles null values differently from Polars and PyArrow.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [2.0, 4.0, float("nan"), 3.0, None, 5.0]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a").drop_nulls())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | shape: (5, 1) |\n | ┌─────┐ |\n | │ a │ |\n | │ --- │ |\n | │ f64 │ |\n | ╞═════╡ |\n | │ 2.0 │ |\n | │ 4.0 │ |\n | │ NaN │ |\n | │ 3.0 │ |\n | │ 5.0 │ |\n | └─────┘ |\n └──────────────────┘\n """\n return self._with_filtration(\n lambda plx: self._to_compliant_expr(plx).drop_nulls()\n )\n\n def sample(\n self,\n n: int | None = None,\n *,\n fraction: float | None = None,\n with_replacement: bool = False,\n seed: int | None = None,\n ) -> Self:\n """Sample randomly from this expression.\n\n Warning:\n `Expr.sample` is deprecated and will be removed in a future version.\n Hint: instead of `df.select(nw.col('a').sample())`, use\n `df.select(nw.col('a')).sample()` instead.\n Note: this will remain available in `narwhals.stable.v1`.\n See [stable api](../backcompat.md/) for more information.\n\n Arguments:\n n: Number of items to return. Cannot be used with fraction.\n fraction: Fraction of items to return. Cannot be used with n.\n with_replacement: Allow values to be sampled more than once.\n seed: Seed for the random number generator. If set to None (default), a random\n seed is generated for each sample operation.\n\n Returns:\n A new expression.\n """\n msg = (\n "`Expr.sample` is deprecated and will be removed in a future version.\n\n"\n "Hint: instead of `df.select(nw.col('a').sample())`, use `df.select(nw.col('a')).sample()`.\n\n"\n "Note: this will remain available in `narwhals.stable.v1`.\n"\n "See https://narwhals-dev.github.io/narwhals/backcompat/ for more information.\n"\n )\n issue_deprecation_warning(msg, _version="1.23.0")\n return self._with_filtration(\n lambda plx: self._to_compliant_expr(plx).sample(\n n, fraction=fraction, with_replacement=with_replacement, seed=seed\n )\n )\n\n def over(\n self,\n *partition_by: str | Sequence[str],\n order_by: str | Sequence[str] | None = None,\n ) -> Self:\n """Compute expressions over the given groups (optionally with given order).\n\n Arguments:\n partition_by: Names of columns to compute window expression over.\n Must be names of columns, as opposed to expressions -\n so, this is a bit less flexible than Polars' `Expr.over`.\n order_by: Column(s) to order window functions by.\n For lazy backends, this argument is required when `over` is applied\n to order-dependent functions, see [order-dependence](../concepts/order_dependence.md).\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 4], "b": ["x", "x", "y"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_min_per_group=nw.col("a").min().over("b"))\n ┌────────────────────────┐\n | Narwhals DataFrame |\n |------------------------|\n | a b a_min_per_group|\n |0 1 x 1|\n |1 2 x 1|\n |2 4 y 4|\n └────────────────────────┘\n\n Cumulative operations are also supported, but (currently) only for\n pandas and Polars:\n\n >>> df.with_columns(a_cum_sum_per_group=nw.col("a").cum_sum().over("b"))\n ┌────────────────────────────┐\n | Narwhals DataFrame |\n |----------------------------|\n | a b a_cum_sum_per_group|\n |0 1 x 1|\n |1 2 x 3|\n |2 4 y 4|\n └────────────────────────────┘\n """\n flat_partition_by = flatten(partition_by)\n flat_order_by = [order_by] if isinstance(order_by, str) else (order_by or [])\n if not flat_partition_by and not flat_order_by: # pragma: no cover\n msg = "At least one of `partition_by` or `order_by` must be specified."\n raise ValueError(msg)\n\n current_meta = self._metadata\n if flat_order_by:\n next_meta = current_meta.with_ordered_over()\n elif not flat_partition_by: # pragma: no cover\n msg = "At least one of `partition_by` or `order_by` must be specified."\n raise InvalidOperationError(msg)\n else:\n next_meta = current_meta.with_partitioned_over()\n\n return self.__class__(\n lambda plx: self._to_compliant_expr(plx).over(\n flat_partition_by, flat_order_by\n ),\n next_meta,\n )\n\n def is_duplicated(self) -> Self:\n r"""Return a boolean mask indicating duplicated values.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3, 1], "b": ["a", "a", "b", "c"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.all().is_duplicated().name.suffix("_is_duplicated"))\n ┌─────────────────────────────────────────┐\n | Narwhals DataFrame |\n |-----------------------------------------|\n | a b a_is_duplicated b_is_duplicated|\n |0 1 a True True|\n |1 2 a False True|\n |2 3 b False False|\n |3 1 c True False|\n └─────────────────────────────────────────┘\n """\n return ~self.is_unique()\n\n def is_unique(self) -> Self:\n r"""Return a boolean mask indicating unique values.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3, 1], "b": ["a", "a", "b", "c"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.all().is_unique().name.suffix("_is_unique"))\n ┌─────────────────────────────────┐\n | Narwhals DataFrame |\n |---------------------------------|\n | a b a_is_unique b_is_unique|\n |0 1 a False False|\n |1 2 a True False|\n |2 3 b True True|\n |3 1 c False True|\n └─────────────────────────────────┘\n """\n return self._with_unorderable_window(\n lambda plx: self._to_compliant_expr(plx).is_unique()\n )\n\n def null_count(self) -> Self:\n r"""Count null values.\n\n Returns:\n A new expression.\n\n Notes:\n pandas handles null values differently from Polars and PyArrow.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {"a": [1, 2, None, 1], "b": ["a", None, "b", None]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.all().null_count())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 1 2 |\n └──────────────────┘\n """\n return self._with_aggregation(\n lambda plx: self._to_compliant_expr(plx).null_count()\n )\n\n def is_first_distinct(self) -> Self:\n r"""Return a boolean mask indicating the first occurrence of each distinct value.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3, 1], "b": ["a", "a", "b", "c"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.all().is_first_distinct().name.suffix("_is_first_distinct")\n ... )\n ┌─────────────────────────────────────────────────┐\n | Narwhals DataFrame |\n |-------------------------------------------------|\n | a b a_is_first_distinct b_is_first_distinct|\n |0 1 a True True|\n |1 2 a True False|\n |2 3 b True True|\n |3 1 c False True|\n └─────────────────────────────────────────────────┘\n """\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).is_first_distinct()\n )\n\n def is_last_distinct(self) -> Self:\n r"""Return a boolean mask indicating the last occurrence of each distinct value.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3, 1], "b": ["a", "a", "b", "c"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.all().is_last_distinct().name.suffix("_is_last_distinct")\n ... )\n ┌───────────────────────────────────────────────┐\n | Narwhals DataFrame |\n |-----------------------------------------------|\n | a b a_is_last_distinct b_is_last_distinct|\n |0 1 a False False|\n |1 2 a True True|\n |2 3 b True True|\n |3 1 c True True|\n └───────────────────────────────────────────────┘\n """\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).is_last_distinct()\n )\n\n def quantile(\n self, quantile: float, interpolation: RollingInterpolationMethod\n ) -> Self:\n r"""Get quantile value.\n\n Arguments:\n quantile: Quantile between 0.0 and 1.0.\n interpolation: Interpolation method.\n\n Returns:\n A new expression.\n\n Note:\n - pandas and Polars may have implementation differences for a given interpolation method.\n - [dask](https://docs.dask.org/en/stable/generated/dask.dataframe.Series.quantile.html) has\n its own method to approximate quantile and it doesn't implement 'nearest', 'higher',\n 'lower', 'midpoint' as interpolation method - use 'linear' which is closest to the\n native 'dask' - method.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {"a": list(range(50)), "b": list(range(50, 100))}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a", "b").quantile(0.5, interpolation="linear"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 24.5 74.5 |\n └──────────────────┘\n """\n return self._with_aggregation(\n lambda plx: self._to_compliant_expr(plx).quantile(quantile, interpolation)\n )\n\n def head(self, n: int = 10) -> Self:\n r"""Get the first `n` rows.\n\n Warning:\n `Expr.head` is deprecated and will be removed in a future version.\n Hint: instead of `df.select(nw.col('a').head())`, use\n `df.select(nw.col('a')).head()` instead.\n Note: this will remain available in `narwhals.stable.v1`.\n See [stable api](../backcompat.md/) for more information.\n\n Arguments:\n n: Number of rows to return.\n\n Returns:\n A new expression.\n """\n msg = (\n "`Expr.head` is deprecated and will be removed in a future version.\n\n"\n "Hint: instead of `df.select(nw.col('a').head())`, use `df.select(nw.col('a')).head()`.\n\n"\n "Note: this will remain available in `narwhals.stable.v1`.\n"\n "See https://narwhals-dev.github.io/narwhals/backcompat/ for more information.\n"\n )\n issue_deprecation_warning(msg, _version="1.23.0")\n return self._with_orderable_filtration(\n lambda plx: self._to_compliant_expr(plx).head(n)\n )\n\n def tail(self, n: int = 10) -> Self:\n r"""Get the last `n` rows.\n\n Warning:\n `Expr.tail` is deprecated and will be removed in a future version.\n Hint: instead of `df.select(nw.col('a').tail())`, use\n `df.select(nw.col('a')).tail()` instead.\n Note: this will remain available in `narwhals.stable.v1`.\n See [stable api](../backcompat.md/) for more information.\n\n Arguments:\n n: Number of rows to return.\n\n Returns:\n A new expression.\n """\n msg = (\n "`Expr.tail` is deprecated and will be removed in a future version.\n\n"\n "Hint: instead of `df.select(nw.col('a').tail())`, use `df.select(nw.col('a')).tail()`.\n\n"\n "Note: this will remain available in `narwhals.stable.v1`.\n"\n "See https://narwhals-dev.github.io/narwhals/backcompat/ for more information.\n"\n )\n issue_deprecation_warning(msg, _version="1.23.0")\n return self._with_filtration(lambda plx: self._to_compliant_expr(plx).tail(n))\n\n def round(self, decimals: int = 0) -> Self:\n r"""Round underlying floating point data by `decimals` digits.\n\n Arguments:\n decimals: Number of decimals to round by.\n\n Returns:\n A new expression.\n\n\n Notes:\n For values exactly halfway between rounded decimal values pandas behaves differently than Polars and Arrow.\n\n pandas rounds to the nearest even value (e.g. -0.5 and 0.5 round to 0.0, 1.5 and 2.5 round to 2.0, 3.5 and\n 4.5 to 4.0, etc..).\n\n Polars and Arrow round away from 0 (e.g. -0.5 to -1.0, 0.5 to 1.0, 1.5 to 2.0, 2.5 to 3.0, etc..).\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1.12345, 2.56789, 3.901234]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_rounded=nw.col("a").round(1))\n ┌──────────────────────┐\n | Narwhals DataFrame |\n |----------------------|\n | a a_rounded|\n |0 1.123450 1.1|\n |1 2.567890 2.6|\n |2 3.901234 3.9|\n └──────────────────────┘\n """\n return self._with_elementwise_op(\n lambda plx: self._to_compliant_expr(plx).round(decimals)\n )\n\n def len(self) -> Self:\n r"""Return the number of elements in the column.\n\n Null values count towards the total.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": ["x", "y", "z"], "b": [1, 2, 1]})\n >>> df = nw.from_native(df_native)\n >>> df.select(\n ... nw.col("a").filter(nw.col("b") == 1).len().alias("a1"),\n ... nw.col("a").filter(nw.col("b") == 2).len().alias("a2"),\n ... )\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a1 a2 |\n | 0 2 1 |\n └──────────────────┘\n """\n return self._with_aggregation(lambda plx: self._to_compliant_expr(plx).len())\n\n def gather_every(self, n: int, offset: int = 0) -> Self:\n r"""Take every nth value in the Series and return as new Series.\n\n Warning:\n `Expr.gather_every` is deprecated and will be removed in a future version.\n Hint: instead of `df.select(nw.col('a').gather_every())`, use\n `df.select(nw.col('a')).gather_every()` instead.\n Note: this will remain available in `narwhals.stable.v1`.\n See [stable api](../backcompat.md/) for more information.\n\n Arguments:\n n: Gather every *n*-th row.\n offset: Starting index.\n\n Returns:\n A new expression.\n """\n msg = (\n "`Expr.gather_every` is deprecated and will be removed in a future version.\n\n"\n "Hint: instead of `df.select(nw.col('a').gather_every())`, use `df.select(nw.col('a')).gather_every()`.\n\n"\n "Note: this will remain available in `narwhals.stable.v1`.\n"\n "See https://narwhals-dev.github.io/narwhals/backcompat/ for more information.\n"\n )\n issue_deprecation_warning(msg, _version="1.23.0")\n return self._with_filtration(\n lambda plx: self._to_compliant_expr(plx).gather_every(n=n, offset=offset)\n )\n\n def clip(\n self,\n lower_bound: IntoExpr | NumericLiteral | TemporalLiteral | None = None,\n upper_bound: IntoExpr | NumericLiteral | TemporalLiteral | None = None,\n ) -> Self:\n r"""Clip values in the Series.\n\n Arguments:\n lower_bound: Lower bound value. String literals are treated as column names.\n upper_bound: Upper bound value. String literals are treated as column names.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_clipped=nw.col("a").clip(-1, 3))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a a_clipped |\n | 0 1 1 |\n | 1 2 2 |\n | 2 3 3 |\n └──────────────────┘\n """\n return self.__class__(\n lambda plx: apply_n_ary_operation(\n plx,\n lambda *exprs: exprs[0].clip(\n exprs[1] if lower_bound is not None else None,\n exprs[2] if upper_bound is not None else None,\n ),\n self,\n lower_bound,\n upper_bound,\n str_as_lit=False,\n ),\n combine_metadata(\n self,\n lower_bound,\n upper_bound,\n str_as_lit=False,\n allow_multi_output=False,\n to_single_output=False,\n ),\n )\n\n def mode(self) -> Self:\n r"""Compute the most occurring value(s).\n\n Can return multiple values.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 1, 2, 3], "b": [1, 1, 2, 2]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a").mode()).sort("a")\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a |\n | 0 1 |\n └──────────────────┘\n """\n return self._with_filtration(lambda plx: self._to_compliant_expr(plx).mode())\n\n def is_finite(self) -> Self:\n """Returns boolean values indicating which original values are finite.\n\n Warning:\n pandas handles null values differently from Polars and PyArrow.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n `is_finite` will return False for NaN and Null's in the Dask and\n pandas non-nullable backend, while for Polars, PyArrow and pandas\n nullable backends null values are kept as such.\n\n Returns:\n Expression of `Boolean` data type.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [float("nan"), float("inf"), 2.0, None]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_is_finite=nw.col("a").is_finite())\n ┌──────────────────────┐\n | Narwhals DataFrame |\n |----------------------|\n |shape: (4, 2) |\n |┌──────┬─────────────┐|\n |│ a ┆ a_is_finite │|\n |│ --- ┆ --- │|\n |│ f64 ┆ bool │|\n |╞══════╪═════════════╡|\n |│ NaN ┆ false │|\n |│ inf ┆ false │|\n |│ 2.0 ┆ true │|\n |│ null ┆ null │|\n |└──────┴─────────────┘|\n └──────────────────────┘\n """\n return self._with_elementwise_op(\n lambda plx: self._to_compliant_expr(plx).is_finite()\n )\n\n def cum_count(self, *, reverse: bool = False) -> Self:\n r"""Return the cumulative count of the non-null values in the column.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": ["x", "k", None, "d"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("a").cum_count().alias("a_cum_count"),\n ... nw.col("a").cum_count(reverse=True).alias("a_cum_count_reverse"),\n ... )\n ┌─────────────────────────────────────────┐\n | Narwhals DataFrame |\n |-----------------------------------------|\n | a a_cum_count a_cum_count_reverse|\n |0 x 1 3|\n |1 k 2 2|\n |2 None 2 1|\n |3 d 3 1|\n └─────────────────────────────────────────┘\n """\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).cum_count(reverse=reverse)\n )\n\n def cum_min(self, *, reverse: bool = False) -> Self:\n r"""Return the cumulative min of the non-null values in the column.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [3, 1, None, 2]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("a").cum_min().alias("a_cum_min"),\n ... nw.col("a").cum_min(reverse=True).alias("a_cum_min_reverse"),\n ... )\n ┌────────────────────────────────────┐\n | Narwhals DataFrame |\n |------------------------------------|\n | a a_cum_min a_cum_min_reverse|\n |0 3.0 3.0 1.0|\n |1 1.0 1.0 1.0|\n |2 NaN NaN NaN|\n |3 2.0 1.0 2.0|\n └────────────────────────────────────┘\n """\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).cum_min(reverse=reverse)\n )\n\n def cum_max(self, *, reverse: bool = False) -> Self:\n r"""Return the cumulative max of the non-null values in the column.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 3, None, 2]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("a").cum_max().alias("a_cum_max"),\n ... nw.col("a").cum_max(reverse=True).alias("a_cum_max_reverse"),\n ... )\n ┌────────────────────────────────────┐\n | Narwhals DataFrame |\n |------------------------------------|\n | a a_cum_max a_cum_max_reverse|\n |0 1.0 1.0 3.0|\n |1 3.0 3.0 3.0|\n |2 NaN NaN NaN|\n |3 2.0 3.0 2.0|\n └────────────────────────────────────┘\n """\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).cum_max(reverse=reverse)\n )\n\n def cum_prod(self, *, reverse: bool = False) -> Self:\n r"""Return the cumulative product of the non-null values in the column.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 3, None, 2]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("a").cum_prod().alias("a_cum_prod"),\n ... nw.col("a").cum_prod(reverse=True).alias("a_cum_prod_reverse"),\n ... )\n ┌──────────────────────────────────────┐\n | Narwhals DataFrame |\n |--------------------------------------|\n | a a_cum_prod a_cum_prod_reverse|\n |0 1.0 1.0 6.0|\n |1 3.0 3.0 6.0|\n |2 NaN NaN NaN|\n |3 2.0 6.0 2.0|\n └──────────────────────────────────────┘\n """\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).cum_prod(reverse=reverse)\n )\n\n def rolling_sum(\n self, window_size: int, *, min_samples: int | None = None, center: bool = False\n ) -> Self:\n """Apply a rolling sum (moving sum) over the values.\n\n A window of length `window_size` will traverse the values. The resulting values\n will be aggregated to their sum.\n\n The window at a given row will include the row itself and the `window_size - 1`\n elements before it.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n window_size: The length of the window in number of elements. It must be a\n strictly positive integer.\n min_samples: The number of values in the window that should be non-null before\n computing a result. If set to `None` (default), it will be set equal to\n `window_size`. If provided, it must be a strictly positive integer, and\n less than or equal to `window_size`\n center: Set the labels at the center of the window.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1.0, 2.0, None, 4.0]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_rolling_sum=nw.col("a").rolling_sum(window_size=3, min_samples=1)\n ... )\n ┌─────────────────────┐\n | Narwhals DataFrame |\n |---------------------|\n | a a_rolling_sum|\n |0 1.0 1.0|\n |1 2.0 3.0|\n |2 NaN 3.0|\n |3 4.0 6.0|\n └─────────────────────┘\n """\n window_size, min_samples_int = _validate_rolling_arguments(\n window_size=window_size, min_samples=min_samples\n )\n\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).rolling_sum(\n window_size=window_size, min_samples=min_samples_int, center=center\n )\n )\n\n def rolling_mean(\n self, window_size: int, *, min_samples: int | None = None, center: bool = False\n ) -> Self:\n """Apply a rolling mean (moving mean) over the values.\n\n A window of length `window_size` will traverse the values. The resulting values\n will be aggregated to their mean.\n\n The window at a given row will include the row itself and the `window_size - 1`\n elements before it.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n window_size: The length of the window in number of elements. It must be a\n strictly positive integer.\n min_samples: The number of values in the window that should be non-null before\n computing a result. If set to `None` (default), it will be set equal to\n `window_size`. If provided, it must be a strictly positive integer, and\n less than or equal to `window_size`\n center: Set the labels at the center of the window.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1.0, 2.0, None, 4.0]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_rolling_mean=nw.col("a").rolling_mean(window_size=3, min_samples=1)\n ... )\n ┌──────────────────────┐\n | Narwhals DataFrame |\n |----------------------|\n | a a_rolling_mean|\n |0 1.0 1.0|\n |1 2.0 1.5|\n |2 NaN 1.5|\n |3 4.0 3.0|\n └──────────────────────┘\n """\n window_size, min_samples = _validate_rolling_arguments(\n window_size=window_size, min_samples=min_samples\n )\n\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).rolling_mean(\n window_size=window_size, min_samples=min_samples, center=center\n )\n )\n\n def rolling_var(\n self,\n window_size: int,\n *,\n min_samples: int | None = None,\n center: bool = False,\n ddof: int = 1,\n ) -> Self:\n """Apply a rolling variance (moving variance) over the values.\n\n A window of length `window_size` will traverse the values. The resulting values\n will be aggregated to their variance.\n\n The window at a given row will include the row itself and the `window_size - 1`\n elements before it.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n window_size: The length of the window in number of elements. It must be a\n strictly positive integer.\n min_samples: The number of values in the window that should be non-null before\n computing a result. If set to `None` (default), it will be set equal to\n `window_size`. If provided, it must be a strictly positive integer, and\n less than or equal to `window_size`.\n center: Set the labels at the center of the window.\n ddof: Delta Degrees of Freedom; the divisor for a length N window is N - ddof.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1.0, 2.0, None, 4.0]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_rolling_var=nw.col("a").rolling_var(window_size=3, min_samples=1)\n ... )\n ┌─────────────────────┐\n | Narwhals DataFrame |\n |---------------------|\n | a a_rolling_var|\n |0 1.0 NaN|\n |1 2.0 0.5|\n |2 NaN 0.5|\n |3 4.0 2.0|\n └─────────────────────┘\n """\n window_size, min_samples = _validate_rolling_arguments(\n window_size=window_size, min_samples=min_samples\n )\n\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).rolling_var(\n window_size=window_size, min_samples=min_samples, center=center, ddof=ddof\n )\n )\n\n def rolling_std(\n self,\n window_size: int,\n *,\n min_samples: int | None = None,\n center: bool = False,\n ddof: int = 1,\n ) -> Self:\n """Apply a rolling standard deviation (moving standard deviation) over the values.\n\n A window of length `window_size` will traverse the values. The resulting values\n will be aggregated to their standard deviation.\n\n The window at a given row will include the row itself and the `window_size - 1`\n elements before it.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n window_size: The length of the window in number of elements. It must be a\n strictly positive integer.\n min_samples: The number of values in the window that should be non-null before\n computing a result. If set to `None` (default), it will be set equal to\n `window_size`. If provided, it must be a strictly positive integer, and\n less than or equal to `window_size`.\n center: Set the labels at the center of the window.\n ddof: Delta Degrees of Freedom; the divisor for a length N window is N - ddof.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1.0, 2.0, None, 4.0]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_rolling_std=nw.col("a").rolling_std(window_size=3, min_samples=1)\n ... )\n ┌─────────────────────┐\n | Narwhals DataFrame |\n |---------------------|\n | a a_rolling_std|\n |0 1.0 NaN|\n |1 2.0 0.707107|\n |2 NaN 0.707107|\n |3 4.0 1.414214|\n └─────────────────────┘\n """\n window_size, min_samples = _validate_rolling_arguments(\n window_size=window_size, min_samples=min_samples\n )\n\n return self._with_orderable_window(\n lambda plx: self._to_compliant_expr(plx).rolling_std(\n window_size=window_size, min_samples=min_samples, center=center, ddof=ddof\n )\n )\n\n def rank(self, method: RankMethod = "average", *, descending: bool = False) -> Self:\n """Assign ranks to data, dealing with ties appropriately.\n\n Notes:\n The resulting dtype may differ between backends.\n\n Info:\n For lazy backends, this operation must be followed by `Expr.over` with\n `order_by` specified, see [order-dependence](../concepts/order_dependence.md).\n\n Arguments:\n method: The method used to assign ranks to tied elements.\n The following methods are available (default is 'average')\n\n - *"average"*: The average of the ranks that would have been assigned to\n all the tied values is assigned to each value.\n - *"min"*: The minimum of the ranks that would have been assigned to all\n the tied values is assigned to each value. (This is also referred to\n as "competition" ranking.)\n - *"max"*: The maximum of the ranks that would have been assigned to all\n the tied values is assigned to each value.\n - *"dense"*: Like "min", but the rank of the next highest element is\n assigned the rank immediately after those assigned to the tied elements.\n - *"ordinal"*: All values are given a distinct rank, corresponding to the\n order that the values occur in the Series.\n\n descending: Rank in descending order.\n\n Returns:\n A new expression with rank data.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [3, 6, 1, 1, 6]})\n >>> df = nw.from_native(df_native)\n >>> result = df.with_columns(rank=nw.col("a").rank(method="dense"))\n >>> result\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a rank |\n | 0 3 2.0 |\n | 1 6 3.0 |\n | 2 1 1.0 |\n | 3 1 1.0 |\n | 4 6 3.0 |\n └──────────────────┘\n """\n supported_rank_methods = {"average", "min", "max", "dense", "ordinal"}\n if method not in supported_rank_methods:\n msg = (\n "Ranking method must be one of {'average', 'min', 'max', 'dense', 'ordinal'}. "\n f"Found '{method}'"\n )\n raise ValueError(msg)\n\n return self._with_unorderable_window(\n lambda plx: self._to_compliant_expr(plx).rank(\n method=method, descending=descending\n )\n )\n\n def log(self, base: float = math.e) -> Self:\n r"""Compute the logarithm to a given base.\n\n Arguments:\n base: Given base, defaults to `e`\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"values": [1, 2, 4]})\n >>> df = nw.from_native(df_native)\n >>> result = df.with_columns(\n ... log=nw.col("values").log(), log_2=nw.col("values").log(base=2)\n ... )\n >>> result\n ┌────────────────────────────────────────────────┐\n | Narwhals DataFrame |\n |------------------------------------------------|\n |pyarrow.Table |\n |values: int64 |\n |log: double |\n |log_2: double |\n |---- |\n |values: [[1,2,4]] |\n |log: [[0,0.6931471805599453,1.3862943611198906]]|\n |log_2: [[0,1,2]] |\n └────────────────────────────────────────────────┘\n """\n return self._with_elementwise_op(\n lambda plx: self._to_compliant_expr(plx).log(base=base)\n )\n\n def exp(self) -> Self:\n r"""Compute the exponent.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"values": [-1, 0, 1]})\n >>> df = nw.from_native(df_native)\n >>> result = df.with_columns(exp=nw.col("values").exp())\n >>> result\n ┌────────────────────────────────────────────────┐\n | Narwhals DataFrame |\n |------------------------------------------------|\n |pyarrow.Table |\n |values: int64 |\n |exp: double |\n |---- |\n |values: [[-1,0,1]] |\n |exp: [[0.36787944117144233,1,2.718281828459045]]|\n └────────────────────────────────────────────────┘\n """\n return self._with_elementwise_op(lambda plx: self._to_compliant_expr(plx).exp())\n\n def sqrt(self) -> Self:\n r"""Compute the square root.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"values": [1, 4, 9]})\n >>> df = nw.from_native(df_native)\n >>> result = df.with_columns(sqrt=nw.col("values").sqrt())\n >>> result\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n |pyarrow.Table |\n |values: int64 |\n |sqrt: double |\n |---- |\n |values: [[1,4,9]] |\n |sqrt: [[1,2,3]] |\n └──────────────────┘\n """\n return self._with_elementwise_op(lambda plx: self._to_compliant_expr(plx).sqrt())\n\n @property\n def str(self) -> ExprStringNamespace[Self]:\n return ExprStringNamespace(self)\n\n @property\n def dt(self) -> ExprDateTimeNamespace[Self]:\n return ExprDateTimeNamespace(self)\n\n @property\n def cat(self) -> ExprCatNamespace[Self]:\n return ExprCatNamespace(self)\n\n @property\n def name(self) -> ExprNameNamespace[Self]:\n return ExprNameNamespace(self)\n\n @property\n def list(self) -> ExprListNamespace[Self]:\n return ExprListNamespace(self)\n\n @property\n def struct(self) -> ExprStructNamespace[Self]:\n return ExprStructNamespace(self)\n\n\n__all__ = ["Expr"]\n
.venv\Lib\site-packages\narwhals\expr.py
expr.py
Python
107,051
0.75
0.068926
0.008881
vue-tools
512
2025-06-06T02:11:53.348673
MIT
false
289d18cfa04dad402d0e565fc196659c
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nif TYPE_CHECKING:\n from narwhals.expr import Expr\n\nExprT = TypeVar("ExprT", bound="Expr")\n\n\nclass ExprCatNamespace(Generic[ExprT]):\n def __init__(self, expr: ExprT) -> None:\n self._expr = expr\n\n def get_categories(self) -> ExprT:\n """Get unique categories from column.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame(\n ... {"fruits": ["apple", "mango", "mango"]},\n ... schema={"fruits": pl.Categorical},\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("fruits").cat.get_categories()).to_native()\n shape: (2, 1)\n ┌────────┐\n │ fruits │\n │ --- │\n │ str │\n ╞════════╡\n │ apple │\n │ mango │\n └────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).cat.get_categories()\n )\n
.venv\Lib\site-packages\narwhals\expr_cat.py
expr_cat.py
Python
1,261
0.85
0.095238
0
node-utils
199
2025-03-14T11:31:13.384373
Apache-2.0
false
60d287ce99bf4f1338d92f8c0cc4c888
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nif TYPE_CHECKING:\n from narwhals.expr import Expr\n from narwhals.typing import TimeUnit\n\nExprT = TypeVar("ExprT", bound="Expr")\n\n\nclass ExprDateTimeNamespace(Generic[ExprT]):\n def __init__(self, expr: ExprT) -> None:\n self._expr = expr\n\n def date(self) -> ExprT:\n """Extract the date from underlying DateTime representation.\n\n Returns:\n A new expression.\n\n Raises:\n NotImplementedError: If pandas default backend is being used.\n\n Examples:\n >>> from datetime import datetime\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame(\n ... {"a": [datetime(2012, 1, 7, 10), datetime(2027, 12, 13)]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a").dt.date()).to_native()\n shape: (2, 1)\n ┌────────────┐\n │ a │\n │ --- │\n │ date │\n ╞════════════╡\n │ 2012-01-07 │\n │ 2027-12-13 │\n └────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.date()\n )\n\n def year(self) -> ExprT:\n """Extract year from underlying DateTime representation.\n\n Returns the year number in the calendar date.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {"a": [datetime(1978, 6, 1), datetime(2065, 1, 1)]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.col("a").dt.year().alias("year"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a year|\n |0 1978-06-01 1978|\n |1 2065-01-01 2065|\n └──────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.year()\n )\n\n def month(self) -> ExprT:\n """Extract month from underlying DateTime representation.\n\n Returns the month number starting from 1. The return value ranges from 1 to 12.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"a": [datetime(1978, 6, 1), datetime(2065, 1, 1)]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.col("a").dt.month().alias("month")).to_native()\n pyarrow.Table\n a: timestamp[us]\n month: int64\n ----\n a: [[1978-06-01 00:00:00.000000,2065-01-01 00:00:00.000000]]\n month: [[6,1]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.month()\n )\n\n def day(self) -> ExprT:\n """Extract day from underlying DateTime representation.\n\n Returns the day of month starting from 1. The return value ranges from 1 to 31. (The last day of month differs by months.)\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"a": [datetime(1978, 6, 1), datetime(2065, 1, 1)]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.col("a").dt.day().alias("day")).to_native()\n pyarrow.Table\n a: timestamp[us]\n day: int64\n ----\n a: [[1978-06-01 00:00:00.000000,2065-01-01 00:00:00.000000]]\n day: [[1,1]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.day()\n )\n\n def hour(self) -> ExprT:\n """Extract hour from underlying DateTime representation.\n\n Returns the hour number from 0 to 23.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame(\n ... {"a": [datetime(1978, 1, 1, 1), datetime(2065, 1, 1, 10)]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.col("a").dt.hour().alias("hour"))\n ┌──────────────────────────────┐\n | Narwhals DataFrame |\n |------------------------------|\n |shape: (2, 2) |\n |┌─────────────────────┬──────┐|\n |│ a ┆ hour │|\n |│ --- ┆ --- │|\n |│ datetime[μs] ┆ i8 │|\n |╞═════════════════════╪══════╡|\n |│ 1978-01-01 01:00:00 ┆ 1 │|\n |│ 2065-01-01 10:00:00 ┆ 10 │|\n |└─────────────────────┴──────┘|\n └──────────────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.hour()\n )\n\n def minute(self) -> ExprT:\n """Extract minutes from underlying DateTime representation.\n\n Returns the minute number from 0 to 59.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {"a": [datetime(1978, 1, 1, 1, 1), datetime(2065, 1, 1, 10, 20)]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.col("a").dt.minute().alias("minute")).to_native()\n a minute\n 0 1978-01-01 01:01:00 1\n 1 2065-01-01 10:20:00 20\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.minute()\n )\n\n def second(self) -> ExprT:\n """Extract seconds from underlying DateTime representation.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table(\n ... {\n ... "a": [\n ... datetime(1978, 1, 1, 1, 1, 1),\n ... datetime(2065, 1, 1, 10, 20, 30),\n ... ]\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.col("a").dt.second().alias("second")).to_native()\n pyarrow.Table\n a: timestamp[us]\n second: int64\n ----\n a: [[1978-01-01 01:01:01.000000,2065-01-01 10:20:30.000000]]\n second: [[1,30]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.second()\n )\n\n def millisecond(self) -> ExprT:\n """Extract milliseconds from underlying DateTime representation.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table(\n ... {\n ... "a": [\n ... datetime(1978, 1, 1, 1, 1, 1, 0),\n ... datetime(2065, 1, 1, 10, 20, 30, 67000),\n ... ]\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("a").dt.millisecond().alias("millisecond")\n ... ).to_native()\n pyarrow.Table\n a: timestamp[us]\n millisecond: int64\n ----\n a: [[1978-01-01 01:01:01.000000,2065-01-01 10:20:30.067000]]\n millisecond: [[0,67]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.millisecond()\n )\n\n def microsecond(self) -> ExprT:\n """Extract microseconds from underlying DateTime representation.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table(\n ... {\n ... "a": [\n ... datetime(1978, 1, 1, 1, 1, 1, 0),\n ... datetime(2065, 1, 1, 10, 20, 30, 67000),\n ... ]\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("a").dt.microsecond().alias("microsecond")\n ... ).to_native()\n pyarrow.Table\n a: timestamp[us]\n microsecond: int64\n ----\n a: [[1978-01-01 01:01:01.000000,2065-01-01 10:20:30.067000]]\n microsecond: [[0,67000]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.microsecond()\n )\n\n def nanosecond(self) -> ExprT:\n """Extract Nanoseconds from underlying DateTime representation.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table(\n ... {\n ... "a": [\n ... datetime(1978, 1, 1, 1, 1, 1, 0),\n ... datetime(2065, 1, 1, 10, 20, 30, 67000),\n ... ]\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("a").dt.nanosecond().alias("nanosecond")\n ... ).to_native()\n pyarrow.Table\n a: timestamp[us]\n nanosecond: int64\n ----\n a: [[1978-01-01 01:01:01.000000,2065-01-01 10:20:30.067000]]\n nanosecond: [[0,67000000]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.nanosecond()\n )\n\n def ordinal_day(self) -> ExprT:\n """Get ordinal day.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {"a": [datetime(2020, 1, 1), datetime(2020, 8, 3)]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_ordinal_day=nw.col("a").dt.ordinal_day())\n ┌───────────────────────────┐\n | Narwhals DataFrame |\n |---------------------------|\n | a a_ordinal_day|\n |0 2020-01-01 1|\n |1 2020-08-03 216|\n └───────────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.ordinal_day()\n )\n\n def weekday(self) -> ExprT:\n """Extract the week day from the underlying Date representation.\n\n Returns:\n Returns the ISO weekday number where monday = 1 and sunday = 7\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {"a": [datetime(2020, 1, 1), datetime(2020, 8, 3)]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_week_day=nw.col("a").dt.weekday())\n ┌────────────────────────┐\n | Narwhals DataFrame |\n |------------------------|\n | a a_week_day|\n |0 2020-01-01 3|\n |1 2020-08-03 1|\n └────────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.weekday()\n )\n\n def total_minutes(self) -> ExprT:\n """Get total minutes.\n\n Returns:\n A new expression.\n\n Notes:\n The function outputs the total minutes in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` and `cast` in this case.\n\n Examples:\n >>> from datetime import timedelta\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame(\n ... {"a": [timedelta(minutes=10), timedelta(minutes=20, seconds=40)]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_total_minutes=nw.col("a").dt.total_minutes()\n ... ).to_native()\n shape: (2, 2)\n ┌──────────────┬─────────────────┐\n │ a ┆ a_total_minutes │\n │ --- ┆ --- │\n │ duration[μs] ┆ i64 │\n ╞══════════════╪═════════════════╡\n │ 10m ┆ 10 │\n │ 20m 40s ┆ 20 │\n └──────────────┴─────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.total_minutes()\n )\n\n def total_seconds(self) -> ExprT:\n """Get total seconds.\n\n Returns:\n A new expression.\n\n Notes:\n The function outputs the total seconds in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` and `cast` in this case.\n\n Examples:\n >>> from datetime import timedelta\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame(\n ... {"a": [timedelta(seconds=10), timedelta(seconds=20, milliseconds=40)]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_total_seconds=nw.col("a").dt.total_seconds()\n ... ).to_native()\n shape: (2, 2)\n ┌──────────────┬─────────────────┐\n │ a ┆ a_total_seconds │\n │ --- ┆ --- │\n │ duration[μs] ┆ i64 │\n ╞══════════════╪═════════════════╡\n │ 10s ┆ 10 │\n │ 20s 40ms ┆ 20 │\n └──────────────┴─────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.total_seconds()\n )\n\n def total_milliseconds(self) -> ExprT:\n """Get total milliseconds.\n\n Returns:\n A new expression.\n\n Notes:\n The function outputs the total milliseconds in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` and `cast` in this case.\n\n Examples:\n >>> from datetime import timedelta\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame(\n ... {\n ... "a": [\n ... timedelta(milliseconds=10),\n ... timedelta(milliseconds=20, microseconds=40),\n ... ]\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_total_milliseconds=nw.col("a").dt.total_milliseconds()\n ... ).to_native()\n shape: (2, 2)\n ┌──────────────┬──────────────────────┐\n │ a ┆ a_total_milliseconds │\n │ --- ┆ --- │\n │ duration[μs] ┆ i64 │\n ╞══════════════╪══════════════════════╡\n │ 10ms ┆ 10 │\n │ 20040µs ┆ 20 │\n └──────────────┴──────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.total_milliseconds()\n )\n\n def total_microseconds(self) -> ExprT:\n """Get total microseconds.\n\n Returns:\n A new expression.\n\n Notes:\n The function outputs the total microseconds in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` and `cast` in this case.\n\n Examples:\n >>> from datetime import timedelta\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table(\n ... {\n ... "a": [\n ... timedelta(microseconds=10),\n ... timedelta(milliseconds=1, microseconds=200),\n ... ]\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_total_microseconds=nw.col("a").dt.total_microseconds()\n ... ).to_native()\n pyarrow.Table\n a: duration[us]\n a_total_microseconds: int64\n ----\n a: [[10,1200]]\n a_total_microseconds: [[10,1200]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.total_microseconds()\n )\n\n def total_nanoseconds(self) -> ExprT:\n """Get total nanoseconds.\n\n Returns:\n A new expression.\n\n Notes:\n The function outputs the total nanoseconds in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` and `cast` in this case.\n\n Examples:\n >>> from datetime import timedelta\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {\n ... "a": pd.to_datetime(\n ... [\n ... "2024-01-01 00:00:00.000000001",\n ... "2024-01-01 00:00:00.000000002",\n ... ]\n ... )\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... a_diff_total_nanoseconds=nw.col("a").diff().dt.total_nanoseconds()\n ... ).to_native()\n a a_diff_total_nanoseconds\n 0 2024-01-01 00:00:00.000000001 NaN\n 1 2024-01-01 00:00:00.000000002 1.0\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.total_nanoseconds()\n )\n\n def to_string(self, format: str) -> ExprT:\n """Convert a Date/Time/Datetime column into a String column with the given format.\n\n Arguments:\n format: Format to format temporal column with.\n\n Returns:\n A new expression.\n\n Notes:\n Unfortunately, different libraries interpret format directives a bit\n differently.\n\n - Chrono, the library used by Polars, uses `"%.f"` for fractional seconds,\n whereas pandas and Python stdlib use `".%f"`.\n - PyArrow interprets `"%S"` as "seconds, including fractional seconds"\n whereas most other tools interpret it as "just seconds, as 2 digits".\n ---\n Therefore, we make the following adjustments.\n\n - for pandas-like libraries, we replace `"%S.%f"` with `"%S%.f"`.\n - for PyArrow, we replace `"%S.%f"` with `"%S"`.\n ---\n Workarounds like these don't make us happy, and we try to avoid them as\n much as possible, but here we feel like it's the best compromise.\n\n If you just want to format a date/datetime Series as a local datetime\n string, and have it work as consistently as possible across libraries,\n we suggest using:\n\n - `"%Y-%m-%dT%H:%M:%S%.f"` for datetimes\n - `"%Y-%m-%d"` for dates\n ---\n Though note that, even then, different tools may return a different number\n of trailing zeros. Nonetheless, this is probably consistent enough for\n most applications.\n\n If you have an application where this is not enough, please open an issue\n and let us know.\n\n Examples:\n >>> from datetime import datetime\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame(\n ... {"a": [datetime(2020, 3, 1), datetime(2020, 5, 1)]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a").dt.to_string("%Y/%m/%d %H:%M:%S"))\n ┌───────────────────────┐\n | Narwhals DataFrame |\n |-----------------------|\n |shape: (2, 1) |\n |┌─────────────────────┐|\n |│ a │|\n |│ --- │|\n |│ str │|\n |╞═════════════════════╡|\n |│ 2020/03/01 00:00:00 │|\n |│ 2020/05/01 00:00:00 │|\n |└─────────────────────┘|\n └───────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.to_string(format)\n )\n\n def replace_time_zone(self, time_zone: str | None) -> ExprT:\n """Replace time zone.\n\n Arguments:\n time_zone: Target time zone.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime, timezone\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {\n ... "a": [\n ... datetime(2024, 1, 1, tzinfo=timezone.utc),\n ... datetime(2024, 1, 2, tzinfo=timezone.utc),\n ... ]\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a").dt.replace_time_zone("Asia/Kathmandu")).to_native()\n a\n 0 2024-01-01 00:00:00+05:45\n 1 2024-01-02 00:00:00+05:45\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.replace_time_zone(time_zone)\n )\n\n def convert_time_zone(self, time_zone: str) -> ExprT:\n """Convert to a new time zone.\n\n If converting from a time-zone-naive column, then conversion happens\n as if converting from UTC.\n\n Arguments:\n time_zone: Target time zone.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime, timezone\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {\n ... "a": [\n ... datetime(2024, 1, 1, tzinfo=timezone.utc),\n ... datetime(2024, 1, 2, tzinfo=timezone.utc),\n ... ]\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a").dt.convert_time_zone("Asia/Kathmandu")).to_native()\n a\n 0 2024-01-01 05:45:00+05:45\n 1 2024-01-02 05:45:00+05:45\n """\n if time_zone is None:\n msg = "Target `time_zone` cannot be `None` in `convert_time_zone`. Please use `replace_time_zone(None)` if you want to remove the time zone."\n raise TypeError(msg)\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.convert_time_zone(time_zone)\n )\n\n def timestamp(self, time_unit: TimeUnit = "us") -> ExprT:\n """Return a timestamp in the given time unit.\n\n Arguments:\n time_unit: One of\n - 'ns': nanosecond.\n - 'us': microsecond.\n - 'ms': millisecond.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import date\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"date": [date(2001, 1, 1), None]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.col("date").dt.timestamp("ms").alias("timestamp_ms"))\n ┌─────────────────────────────┐\n | Narwhals DataFrame |\n |-----------------------------|\n |shape: (2, 2) |\n |┌────────────┬──────────────┐|\n |│ date ┆ timestamp_ms │|\n |│ --- ┆ --- │|\n |│ date ┆ i64 │|\n |╞════════════╪══════════════╡|\n |│ 2001-01-01 ┆ 978307200000 │|\n |│ null ┆ null │|\n |└────────────┴──────────────┘|\n └─────────────────────────────┘\n """\n if time_unit not in {"ns", "us", "ms"}:\n msg = (\n "invalid `time_unit`"\n f"\n\nExpected one of {{'ns', 'us', 'ms'}}, got {time_unit!r}."\n )\n raise ValueError(msg)\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.timestamp(time_unit)\n )\n\n def truncate(self, every: str) -> ExprT:\n """Divide the date/datetime range into buckets.\n\n Arguments:\n every: Length of bucket. Must be of form `<multiple><unit>`,\n where `multiple` is a positive integer and `unit` is one of\n\n - 'ns': nanosecond.\n - 'us': microsecond.\n - 'ms': millisecond.\n - 's': second.\n - 'm': minute.\n - 'h': hour.\n - 'd': day.\n - 'mo': month.\n - 'q': quarter.\n - 'y': year.\n\n Returns:\n Expression of data type `Date` or `Datetime`.\n\n Examples:\n >>> from datetime import datetime\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"datetime": [datetime(2021, 3, 1, 12, 34)]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... nw.col("datetime").dt.truncate("1h").alias("datetime_trunc")\n ... )\n ┌─────────────────────────────────────────────┐\n | Narwhals DataFrame |\n |---------------------------------------------|\n |shape: (1, 2) |\n |┌─────────────────────┬─────────────────────┐|\n |│ datetime ┆ datetime_trunc │|\n |│ --- ┆ --- │|\n |│ datetime[μs] ┆ datetime[μs] │|\n |╞═════════════════════╪═════════════════════╡|\n |│ 2021-03-01 12:34:00 ┆ 2021-03-01 12:00:00 │|\n |└─────────────────────┴─────────────────────┘|\n └─────────────────────────────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).dt.truncate(every)\n )\n
.venv\Lib\site-packages\narwhals\expr_dt.py
expr_dt.py
Python
31,229
0.85
0.052296
0
python-kit
400
2025-05-10T09:12:12.383593
Apache-2.0
false
1464c5932c8a8f298d39fc82d8506fdb
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nif TYPE_CHECKING:\n from narwhals.expr import Expr\n\nExprT = TypeVar("ExprT", bound="Expr")\n\n\nclass ExprListNamespace(Generic[ExprT]):\n def __init__(self, expr: ExprT) -> None:\n self._expr = expr\n\n def len(self) -> ExprT:\n """Return the number of elements in each list.\n\n Null values count towards the total.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [[1, 2], [3, 4, None], None, []]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(a_len=nw.col("a").list.len())\n ┌────────────────────────┐\n | Narwhals DataFrame |\n |------------------------|\n |shape: (4, 2) |\n |┌──────────────┬───────┐|\n |│ a ┆ a_len │|\n |│ --- ┆ --- │|\n |│ list[i64] ┆ u32 │|\n |╞══════════════╪═══════╡|\n |│ [1, 2] ┆ 2 │|\n |│ [3, 4, null] ┆ 3 │|\n |│ null ┆ null │|\n |│ [] ┆ 0 │|\n |└──────────────┴───────┘|\n └────────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).list.len()\n )\n
.venv\Lib\site-packages\narwhals\expr_list.py
expr_list.py
Python
1,772
0.85
0.085106
0
react-lib
200
2024-08-03T05:45:41.111058
Apache-2.0
false
bf014eda6ba59f1fe13b0dfdb00dd7ac
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Callable, Generic, TypeVar\n\nif TYPE_CHECKING:\n from narwhals.expr import Expr\n\nExprT = TypeVar("ExprT", bound="Expr")\n\n\nclass ExprNameNamespace(Generic[ExprT]):\n def __init__(self, expr: ExprT) -> None:\n self._expr = expr\n\n def keep(self) -> ExprT:\n r"""Keep the original root name of the expression.\n\n Returns:\n A new expression.\n\n Notes:\n This will undo any previous renaming operations on the expression.\n Due to implementation constraints, this method can only be called as the last\n expression in a chain. Only one name operation per expression will work.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"foo": [1, 2], "BAR": [4, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("foo").alias("alias_for_foo").name.keep()).columns\n ['foo']\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).name.keep()\n )\n\n def map(self, function: Callable[[str], str]) -> ExprT:\n r"""Rename the output of an expression by mapping a function over the root name.\n\n Arguments:\n function: Function that maps a root name to a new name.\n\n Returns:\n A new expression.\n\n Notes:\n This will undo any previous renaming operations on the expression.\n Due to implementation constraints, this method can only be called as the last\n expression in a chain. Only one name operation per expression will work.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"foo": [1, 2], "BAR": [4, 5]})\n >>> df = nw.from_native(df_native)\n >>> renaming_func = lambda s: s[::-1] # reverse column name\n >>> df.select(nw.col("foo", "BAR").name.map(renaming_func)).columns\n ['oof', 'RAB']\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).name.map(function)\n )\n\n def prefix(self, prefix: str) -> ExprT:\n r"""Add a prefix to the root column name of the expression.\n\n Arguments:\n prefix: Prefix to add to the root column name.\n\n Returns:\n A new expression.\n\n Notes:\n This will undo any previous renaming operations on the expression.\n Due to implementation constraints, this method can only be called as the last\n expression in a chain. Only one name operation per expression will work.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"foo": [1, 2], "BAR": [4, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("foo", "BAR").name.prefix("with_prefix")).columns\n ['with_prefixfoo', 'with_prefixBAR']\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).name.prefix(prefix)\n )\n\n def suffix(self, suffix: str) -> ExprT:\n r"""Add a suffix to the root column name of the expression.\n\n Arguments:\n suffix: Suffix to add to the root column name.\n\n Returns:\n A new expression.\n\n Notes:\n This will undo any previous renaming operations on the expression.\n Due to implementation constraints, this method can only be called as the last\n expression in a chain. Only one name operation per expression will work.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"foo": [1, 2], "BAR": [4, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("foo", "BAR").name.suffix("_with_suffix")).columns\n ['foo_with_suffix', 'BAR_with_suffix']\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).name.suffix(suffix)\n )\n\n def to_lowercase(self) -> ExprT:\n r"""Make the root column name lowercase.\n\n Returns:\n A new expression.\n\n Notes:\n This will undo any previous renaming operations on the expression.\n Due to implementation constraints, this method can only be called as the last\n expression in a chain. Only one name operation per expression will work.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"foo": [1, 2], "BAR": [4, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("foo", "BAR").name.to_lowercase()).columns\n ['foo', 'bar']\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).name.to_lowercase()\n )\n\n def to_uppercase(self) -> ExprT:\n r"""Make the root column name uppercase.\n\n Returns:\n A new expression.\n\n Notes:\n This will undo any previous renaming operations on the expression.\n Due to implementation constraints, this method can only be called as the last\n expression in a chain. Only one name operation per expression will work.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"foo": [1, 2], "BAR": [4, 5]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("foo", "BAR").name.to_uppercase()).columns\n ['FOO', 'BAR']\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).name.to_uppercase()\n )\n
.venv\Lib\site-packages\narwhals\expr_name.py
expr_name.py
Python
6,012
0.95
0.080745
0
python-kit
606
2024-03-08T16:55:18.352946
GPL-3.0
false
1008760c02ccf2aabc870f58a10fe8fb
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nif TYPE_CHECKING:\n from narwhals.expr import Expr\n\nExprT = TypeVar("ExprT", bound="Expr")\n\n\nclass ExprStringNamespace(Generic[ExprT]):\n def __init__(self, expr: ExprT) -> None:\n self._expr = expr\n\n def len_chars(self) -> ExprT:\n r"""Return the length of each string as the number of characters.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"words": ["foo", "345", None]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(words_len=nw.col("words").str.len_chars())\n ┌─────────────────────┐\n | Narwhals DataFrame |\n |---------------------|\n |shape: (3, 2) |\n |┌───────┬───────────┐|\n |│ words ┆ words_len │|\n |│ --- ┆ --- │|\n |│ str ┆ u32 │|\n |╞═══════╪═══════════╡|\n |│ foo ┆ 3 │|\n |│ 345 ┆ 3 │|\n |│ null ┆ null │|\n |└───────┴───────────┘|\n └─────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.len_chars()\n )\n\n def replace(\n self, pattern: str, value: str, *, literal: bool = False, n: int = 1\n ) -> ExprT:\n r"""Replace first matching regex/literal substring with a new string value.\n\n Arguments:\n pattern: A valid regular expression pattern.\n value: String that will replace the matched substring.\n literal: Treat `pattern` as a literal string.\n n: Number of matches to replace.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"foo": ["123abc", "abc abc123"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(replaced=nw.col("foo").str.replace("abc", ""))\n ┌──────────────────────┐\n | Narwhals DataFrame |\n |----------------------|\n | foo replaced|\n |0 123abc 123|\n |1 abc abc123 abc123|\n └──────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.replace(\n pattern, value, literal=literal, n=n\n )\n )\n\n def replace_all(self, pattern: str, value: str, *, literal: bool = False) -> ExprT:\n r"""Replace all matching regex/literal substring with a new string value.\n\n Arguments:\n pattern: A valid regular expression pattern.\n value: String that will replace the matched substring.\n literal: Treat `pattern` as a literal string.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"foo": ["123abc", "abc abc123"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(replaced=nw.col("foo").str.replace_all("abc", ""))\n ┌──────────────────────┐\n | Narwhals DataFrame |\n |----------------------|\n | foo replaced|\n |0 123abc 123|\n |1 abc abc123 123|\n └──────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.replace_all(\n pattern, value, literal=literal\n )\n )\n\n def strip_chars(self, characters: str | None = None) -> ExprT:\n r"""Remove leading and trailing characters.\n\n Arguments:\n characters: The set of characters to be removed. All combinations of this\n set of characters will be stripped from the start and end of the string.\n If set to None (default), all leading and trailing whitespace is removed\n instead.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"fruits": ["apple", "\nmango"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(stripped=nw.col("fruits").str.strip_chars()).to_dict(\n ... as_series=False\n ... )\n {'fruits': ['apple', '\nmango'], 'stripped': ['apple', 'mango']}\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.strip_chars(characters)\n )\n\n def starts_with(self, prefix: str) -> ExprT:\n r"""Check if string values start with a substring.\n\n Arguments:\n prefix: prefix substring\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"fruits": ["apple", "mango", None]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(has_prefix=nw.col("fruits").str.starts_with("app"))\n ┌───────────────────┐\n |Narwhals DataFrame |\n |-------------------|\n | fruits has_prefix|\n |0 apple True|\n |1 mango False|\n |2 None None|\n └───────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.starts_with(prefix)\n )\n\n def ends_with(self, suffix: str) -> ExprT:\n r"""Check if string values end with a substring.\n\n Arguments:\n suffix: suffix substring\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"fruits": ["apple", "mango", None]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(has_suffix=nw.col("fruits").str.ends_with("ngo"))\n ┌───────────────────┐\n |Narwhals DataFrame |\n |-------------------|\n | fruits has_suffix|\n |0 apple False|\n |1 mango True|\n |2 None None|\n └───────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.ends_with(suffix)\n )\n\n def contains(self, pattern: str, *, literal: bool = False) -> ExprT:\n r"""Check if string contains a substring that matches a pattern.\n\n Arguments:\n pattern: A Character sequence or valid regular expression pattern.\n literal: If True, treats the pattern as a literal string.\n If False, assumes the pattern is a regular expression.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"pets": ["cat", "dog", "rabbit and parrot"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(\n ... default_match=nw.col("pets").str.contains("cat|parrot"),\n ... case_insensitive_match=nw.col("pets").str.contains("cat|(?i)parrot"),\n ... ).to_native()\n pyarrow.Table\n pets: string\n default_match: bool\n case_insensitive_match: bool\n ----\n pets: [["cat","dog","rabbit and parrot"]]\n default_match: [[true,false,true]]\n case_insensitive_match: [[true,false,true]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.contains(\n pattern, literal=literal\n )\n )\n\n def slice(self, offset: int, length: int | None = None) -> ExprT:\n r"""Create subslices of the string values of an expression.\n\n Arguments:\n offset: Start index. Negative indexing is supported.\n length: Length of the slice. If set to `None` (default), the slice is taken to the\n end of the string.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"s": ["pear", None, "papaya"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(s_sliced=nw.col("s").str.slice(4, length=3))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | s s_sliced|\n |0 pear |\n |1 None None|\n |2 papaya ya|\n └──────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.slice(\n offset=offset, length=length\n )\n )\n\n def split(self, by: str) -> ExprT:\n r"""Split the string values of an expression by a substring.\n\n Arguments:\n by: Substring to split by.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"s": ["foo bar", "foo_bar"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(nw.col("s").str.split("_").alias("s_split"))\n ┌────────────────────────────┐\n | Narwhals DataFrame |\n |----------------------------|\n |shape: (2, 2) |\n |┌─────────┬────────────────┐|\n |│ s ┆ s_split │|\n |│ --- ┆ --- │|\n |│ str ┆ list[str] │|\n |╞═════════╪════════════════╡|\n |│ foo bar ┆ ["foo bar"] │|\n |│ foo_bar ┆ ["foo", "bar"] │|\n |└─────────┴────────────────┘|\n └────────────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.split(by=by)\n )\n\n def head(self, n: int = 5) -> ExprT:\n r"""Take the first n elements of each string.\n\n Arguments:\n n: Number of elements to take. Negative indexing is **not** supported.\n\n Returns:\n A new expression.\n\n Notes:\n If the length of the string has fewer than `n` characters, the full string is returned.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"lyrics": ["taata", "taatatata", "zukkyun"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(lyrics_head=nw.col("lyrics").str.head()).to_native()\n pyarrow.Table\n lyrics: string\n lyrics_head: string\n ----\n lyrics: [["taata","taatatata","zukkyun"]]\n lyrics_head: [["taata","taata","zukky"]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.slice(0, n)\n )\n\n def tail(self, n: int = 5) -> ExprT:\n r"""Take the last n elements of each string.\n\n Arguments:\n n: Number of elements to take. Negative indexing is **not** supported.\n\n Returns:\n A new expression.\n\n Notes:\n If the length of the string has fewer than `n` characters, the full string is returned.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"lyrics": ["taata", "taatatata", "zukkyun"]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(lyrics_tail=nw.col("lyrics").str.tail()).to_native()\n pyarrow.Table\n lyrics: string\n lyrics_tail: string\n ----\n lyrics: [["taata","taatatata","zukkyun"]]\n lyrics_tail: [["taata","atata","kkyun"]]\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.slice(\n offset=-n, length=None\n )\n )\n\n def to_datetime(self, format: str | None = None) -> ExprT:\n """Convert to Datetime dtype.\n\n Notes:\n - pandas defaults to nanosecond time unit, Polars to microsecond.\n Prior to pandas 2.0, nanoseconds were the only time unit supported\n in pandas, with no ability to set any other one. The ability to\n set the time unit in pandas, if the version permits, will arrive.\n - timezone-aware strings are all converted to and parsed as UTC.\n\n Warning:\n As different backends auto-infer format in different ways, if `format=None`\n there is no guarantee that the result will be equal.\n\n Arguments:\n format: Format to use for conversion. If set to None (default), the format is\n inferred from the data.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": ["2020-01-01", "2020-01-02"]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a").str.to_datetime(format="%Y-%m-%d"))\n ┌───────────────────────┐\n | Narwhals DataFrame |\n |-----------------------|\n |shape: (2, 1) |\n |┌─────────────────────┐|\n |│ a │|\n |│ --- │|\n |│ datetime[μs] │|\n |╞═════════════════════╡|\n |│ 2020-01-01 00:00:00 │|\n |│ 2020-01-02 00:00:00 │|\n |└─────────────────────┘|\n └───────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.to_datetime(format=format)\n )\n\n def to_date(self, format: str | None = None) -> ExprT:\n """Convert to date dtype.\n\n Warning:\n As different backends auto-infer format in different ways, if `format=None`\n there is no guarantee that the result will be equal.\n\n Arguments:\n format: Format to use for conversion. If set to None (default), the format is inferred from the data.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> df_native = pa.table({"a": ["2020-01-01", "2020-01-02"]})\n >>> df = nw.from_native(df_native)\n >>> df.select(nw.col("a").str.to_date(format="%Y-%m-%d"))\n ┌────────────────────────────┐\n | Narwhals DataFrame |\n |----------------------------|\n |pyarrow.Table |\n |a: date32[day] |\n |---- |\n |a: [[2020-01-01,2020-01-02]]|\n └────────────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.to_date(format=format)\n )\n\n def to_uppercase(self) -> ExprT:\n r"""Transform string to uppercase variant.\n\n Returns:\n A new expression.\n\n Notes:\n The PyArrow backend will convert 'ß' to 'ẞ' instead of 'SS'.\n For more info see [the related issue](https://github.com/apache/arrow/issues/34599).\n There may be other unicode-edge-case-related variations across implementations.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"fruits": ["apple", None]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(upper_col=nw.col("fruits").str.to_uppercase())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | fruits upper_col|\n |0 apple APPLE|\n |1 None None|\n └──────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.to_uppercase()\n )\n\n def to_lowercase(self) -> ExprT:\n r"""Transform string to lowercase variant.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"fruits": ["APPLE", None]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(lower_col=nw.col("fruits").str.to_lowercase())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | fruits lower_col|\n |0 APPLE apple|\n |1 None None|\n └──────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.to_lowercase()\n )\n\n def zfill(self, width: int) -> ExprT:\n """Transform string to zero-padded variant.\n\n Arguments:\n width: The desired length of the string after padding. If the length of the\n string is greater than `width`, no padding is applied.\n If `width` is less than 0, no padding is applied.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"digits": ["+1", "-1", "1", None]})\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(zfill_col=nw.col("digits").str.zfill(3))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | digits zfill_col|\n |0 +1 +01|\n |1 -1 -01|\n |2 1 001|\n |3 None None|\n └──────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).str.zfill(width)\n )\n
.venv\Lib\site-packages\narwhals\expr_str.py
expr_str.py
Python
20,335
0.95
0.052734
0
react-lib
41
2025-01-25T17:05:32.196685
MIT
false
eecd2f09c9750a8bb2493c19962a783f
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nif TYPE_CHECKING:\n from narwhals.expr import Expr\n\nExprT = TypeVar("ExprT", bound="Expr")\n\n\nclass ExprStructNamespace(Generic[ExprT]):\n def __init__(self, expr: ExprT) -> None:\n self._expr = expr\n\n def field(self, name: str) -> ExprT:\n r"""Retrieve a Struct field as a new expression.\n\n Arguments:\n name: Name of the struct field to retrieve.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame(\n ... {"user": [{"id": "0", "name": "john"}, {"id": "1", "name": "jane"}]}\n ... )\n >>> df = nw.from_native(df_native)\n >>> df.with_columns(name=nw.col("user").struct.field("name"))\n ┌───────────────────────┐\n | Narwhals DataFrame |\n |-----------------------|\n |shape: (2, 2) |\n |┌──────────────┬──────┐|\n |│ user ┆ name │|\n |│ --- ┆ --- │|\n |│ struct[2] ┆ str │|\n |╞══════════════╪══════╡|\n |│ {"0","john"} ┆ john │|\n |│ {"1","jane"} ┆ jane │|\n |└──────────────┴──────┘|\n └───────────────────────┘\n """\n return self._expr._with_elementwise_op(\n lambda plx: self._expr._to_compliant_expr(plx).struct.field(name)\n )\n
.venv\Lib\site-packages\narwhals\expr_struct.py
expr_struct.py
Python
1,793
0.85
0.083333
0
python-kit
48
2024-04-16T12:12:24.517788
Apache-2.0
false
a1a7bea5d21b34c172d97d9af7df00d8
from __future__ import annotations\n\nimport platform\nimport sys\nfrom collections.abc import Iterable, Mapping, Sequence\nfrom functools import partial\nfrom importlib.metadata import version\nfrom typing import TYPE_CHECKING, Any, Literal, cast\n\nfrom narwhals._expression_parsing import (\n ExprKind,\n ExprMetadata,\n apply_n_ary_operation,\n combine_metadata,\n extract_compliant,\n is_scalar_like,\n)\nfrom narwhals._typing_compat import deprecated\nfrom narwhals._utils import (\n Implementation,\n Version,\n deprecate_native_namespace,\n flatten,\n is_compliant_expr,\n is_eager_allowed,\n is_sequence_but_not_str,\n issue_deprecation_warning,\n parse_version,\n supports_arrow_c_stream,\n validate_laziness,\n)\nfrom narwhals.dependencies import (\n is_narwhals_series,\n is_numpy_array,\n is_numpy_array_2d,\n is_pyarrow_table,\n)\nfrom narwhals.exceptions import InvalidOperationError, ShapeError\nfrom narwhals.expr import Expr\nfrom narwhals.translate import from_native, to_native\n\nif TYPE_CHECKING:\n from types import ModuleType\n\n from typing_extensions import TypeAlias, TypeIs\n\n from narwhals._compliant import CompliantExpr, CompliantNamespace\n from narwhals._translate import IntoArrowTable\n from narwhals.dataframe import DataFrame, LazyFrame\n from narwhals.dtypes import DType\n from narwhals.schema import Schema\n from narwhals.series import Series\n from narwhals.typing import (\n ConcatMethod,\n FrameT,\n IntoDType,\n IntoExpr,\n IntoSeriesT,\n NativeFrame,\n NativeLazyFrame,\n NativeSeries,\n NonNestedLiteral,\n _1DArray,\n _2DArray,\n )\n\n _IntoSchema: TypeAlias = "Mapping[str, DType] | Schema | Sequence[str] | None"\n\n\ndef concat(items: Iterable[FrameT], *, how: ConcatMethod = "vertical") -> FrameT:\n """Concatenate multiple DataFrames, LazyFrames into a single entity.\n\n Arguments:\n items: DataFrames, LazyFrames to concatenate.\n how: concatenating strategy\n\n - vertical: Concatenate vertically. Column names must match.\n - horizontal: Concatenate horizontally. If lengths don't match, then\n missing rows are filled with null values. This is only supported\n when all inputs are (eager) DataFrames.\n - diagonal: Finds a union between the column schemas and fills missing column\n values with null.\n\n Returns:\n A new DataFrame or LazyFrame resulting from the concatenation.\n\n Raises:\n TypeError: The items to concatenate should either all be eager, or all lazy\n\n Examples:\n Let's take an example of vertical concatenation:\n\n >>> import pandas as pd\n >>> import polars as pl\n >>> import pyarrow as pa\n >>> import narwhals as nw\n\n Let's look at one case a for vertical concatenation (pandas backed):\n\n >>> df_pd_1 = nw.from_native(pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}))\n >>> df_pd_2 = nw.from_native(pd.DataFrame({"a": [5, 2], "b": [1, 4]}))\n >>> nw.concat([df_pd_1, df_pd_2], how="vertical")\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 1 4 |\n | 1 2 5 |\n | 2 3 6 |\n | 0 5 1 |\n | 1 2 4 |\n └──────────────────┘\n\n Let's look at one case a for horizontal concatenation (polars backed):\n\n >>> df_pl_1 = nw.from_native(pl.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}))\n >>> df_pl_2 = nw.from_native(pl.DataFrame({"c": [5, 2], "d": [1, 4]}))\n >>> nw.concat([df_pl_1, df_pl_2], how="horizontal")\n ┌───────────────────────────┐\n | Narwhals DataFrame |\n |---------------------------|\n |shape: (3, 4) |\n |┌─────┬─────┬──────┬──────┐|\n |│ a ┆ b ┆ c ┆ d │|\n |│ --- ┆ --- ┆ --- ┆ --- │|\n |│ i64 ┆ i64 ┆ i64 ┆ i64 │|\n |╞═════╪═════╪══════╪══════╡|\n |│ 1 ┆ 4 ┆ 5 ┆ 1 │|\n |│ 2 ┆ 5 ┆ 2 ┆ 4 │|\n |│ 3 ┆ 6 ┆ null ┆ null │|\n |└─────┴─────┴──────┴──────┘|\n └───────────────────────────┘\n\n Let's look at one case a for diagonal concatenation (pyarrow backed):\n\n >>> df_pa_1 = nw.from_native(pa.table({"a": [1, 2], "b": [3.5, 4.5]}))\n >>> df_pa_2 = nw.from_native(pa.table({"a": [3, 4], "z": ["x", "y"]}))\n >>> nw.concat([df_pa_1, df_pa_2], how="diagonal")\n ┌──────────────────────────┐\n | Narwhals DataFrame |\n |--------------------------|\n |pyarrow.Table |\n |a: int64 |\n |b: double |\n |z: string |\n |---- |\n |a: [[1,2],[3,4]] |\n |b: [[3.5,4.5],[null,null]]|\n |z: [[null,null],["x","y"]]|\n └──────────────────────────┘\n """\n from narwhals.dependencies import is_narwhals_lazyframe\n\n if not items:\n msg = "No items to concatenate."\n raise ValueError(msg)\n items = list(items)\n validate_laziness(items)\n if how not in {"horizontal", "vertical", "diagonal"}: # pragma: no cover\n msg = "Only vertical, horizontal and diagonal concatenations are supported."\n raise NotImplementedError(msg)\n first_item = items[0]\n if is_narwhals_lazyframe(first_item) and how == "horizontal":\n msg = (\n "Horizontal concatenation is not supported for LazyFrames.\n\n"\n "Hint: you may want to use `join` instead."\n )\n raise InvalidOperationError(msg)\n plx = first_item.__narwhals_namespace__()\n return first_item._with_compliant(\n plx.concat([df._compliant_frame for df in items], how=how)\n )\n\n\n@deprecate_native_namespace(warn_version="1.31.0", required=True)\ndef new_series(\n name: str,\n values: Any,\n dtype: IntoDType | None = None,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n) -> Series[Any]:\n """Instantiate Narwhals Series from iterable (e.g. list or array).\n\n Arguments:\n name: Name of resulting Series.\n values: Values of make Series from.\n dtype: (Narwhals) dtype. If not provided, the native library\n may auto-infer it from `values`.\n backend: specifies which eager backend instantiate to.\n\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n Returns:\n A new Series\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> values = [4, 1, 2, 3]\n >>> nw.new_series(name="a", values=values, dtype=nw.Int32, backend=pd)\n ┌─────────────────────┐\n | Narwhals Series |\n |---------------------|\n |0 4 |\n |1 1 |\n |2 2 |\n |3 3 |\n |Name: a, dtype: int32|\n └─────────────────────┘\n """\n backend = cast("ModuleType | Implementation | str", backend)\n return _new_series_impl(name, values, dtype, backend=backend)\n\n\ndef _new_series_impl(\n name: str,\n values: Any,\n dtype: IntoDType | None = None,\n *,\n backend: ModuleType | Implementation | str,\n) -> Series[Any]:\n implementation = Implementation.from_backend(backend)\n if is_eager_allowed(implementation):\n ns = Version.MAIN.namespace.from_backend(implementation).compliant\n series = ns._series.from_iterable(values, name=name, context=ns, dtype=dtype)\n return series.to_narwhals()\n elif implementation is Implementation.UNKNOWN: # pragma: no cover\n _native_namespace = implementation.to_native_namespace()\n try:\n native_series: NativeSeries = _native_namespace.new_series(\n name, values, dtype\n )\n return from_native(native_series, series_only=True).alias(name)\n except AttributeError as e:\n msg = "Unknown namespace is expected to implement `new_series` constructor."\n raise AttributeError(msg) from e\n msg = (\n f"{implementation} support in Narwhals is lazy-only, but `new_series` is an eager-only function.\n\n"\n "Hint: you may want to use an eager backend and then call `.lazy`, e.g.:\n\n"\n f" nw.new_series('a', [1,2,3], backend='pyarrow').to_frame().lazy('{implementation}')"\n )\n raise ValueError(msg)\n\n\n@deprecate_native_namespace(warn_version="1.26.0")\ndef from_dict(\n data: Mapping[str, Any],\n schema: Mapping[str, DType] | Schema | None = None,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n) -> DataFrame[Any]:\n """Instantiate DataFrame from dictionary.\n\n Indexes (if present, for pandas-like backends) are aligned following\n the [left-hand-rule](../concepts/pandas_index.md/).\n\n Notes:\n For pandas-like dataframes, conversion to schema is applied after dataframe\n creation.\n\n Arguments:\n data: Dictionary to create DataFrame from.\n schema: The DataFrame schema as Schema or dict of {name: type}. If not\n specified, the schema will be inferred by the native library.\n backend: specifies which eager backend instantiate to. Only\n necessary if inputs are not Narwhals Series.\n\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.26.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n Returns:\n A new DataFrame.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> data = {"c": [5, 2], "d": [1, 4]}\n >>> nw.from_dict(data, backend="pandas")\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | c d |\n | 0 5 1 |\n | 1 2 4 |\n └──────────────────┘\n """\n if not data:\n msg = "from_dict cannot be called with empty dictionary"\n raise ValueError(msg)\n if backend is None:\n data, backend = _from_dict_no_backend(data)\n implementation = Implementation.from_backend(backend)\n if is_eager_allowed(implementation):\n ns = Version.MAIN.namespace.from_backend(implementation).compliant\n return ns._dataframe.from_dict(data, schema=schema, context=ns).to_narwhals()\n elif implementation is Implementation.UNKNOWN: # pragma: no cover\n _native_namespace = implementation.to_native_namespace()\n try:\n # implementation is UNKNOWN, Narwhals extension using this feature should\n # implement `from_dict` function in the top-level namespace.\n native_frame: NativeFrame = _native_namespace.from_dict(data, schema=schema)\n except AttributeError as e:\n msg = "Unknown namespace is expected to implement `from_dict` function."\n raise AttributeError(msg) from e\n return from_native(native_frame, eager_only=True)\n msg = (\n f"{implementation} support in Narwhals is lazy-only, but `from_dict` is an eager-only function.\n\n"\n "Hint: you may want to use an eager backend and then call `.lazy`, e.g.:\n\n"\n f" nw.from_dict({{'a': [1, 2]}}, backend='pyarrow').lazy('{implementation}')"\n )\n raise ValueError(msg)\n\n\ndef _from_dict_no_backend(\n data: Mapping[str, Series[Any] | Any], /\n) -> tuple[dict[str, Series[Any] | Any], ModuleType]:\n for val in data.values():\n if is_narwhals_series(val):\n native_namespace = val.__native_namespace__()\n break\n else:\n msg = "Calling `from_dict` without `backend` is only supported if all input values are already Narwhals Series"\n raise TypeError(msg)\n data = {key: to_native(value, pass_through=True) for key, value in data.items()}\n return data, native_namespace\n\n\n@deprecate_native_namespace(warn_version="1.31.0", required=True)\ndef from_numpy(\n data: _2DArray,\n schema: Mapping[str, DType] | Schema | Sequence[str] | None = None,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n) -> DataFrame[Any]:\n """Construct a DataFrame from a NumPy ndarray.\n\n Notes:\n Only row orientation is currently supported.\n\n For pandas-like dataframes, conversion to schema is applied after dataframe\n creation.\n\n Arguments:\n data: Two-dimensional data represented as a NumPy ndarray.\n schema: The DataFrame schema as Schema, dict of {name: type}, or a sequence of str.\n backend: specifies which eager backend instantiate to.\n\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n Returns:\n A new DataFrame.\n\n Examples:\n >>> import numpy as np\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> arr = np.array([[5, 2, 1], [1, 4, 3]])\n >>> schema = {"c": nw.Int16(), "d": nw.Float32(), "e": nw.Int8()}\n >>> nw.from_numpy(arr, schema=schema, backend="pyarrow")\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | pyarrow.Table |\n | c: int16 |\n | d: float |\n | e: int8 |\n | ---- |\n | c: [[5,1]] |\n | d: [[2,4]] |\n | e: [[1,3]] |\n └──────────────────┘\n """\n backend = cast("ModuleType | Implementation | str", backend)\n if not is_numpy_array_2d(data):\n msg = "`from_numpy` only accepts 2D numpy arrays"\n raise ValueError(msg)\n if not _is_into_schema(schema):\n msg = (\n "`schema` is expected to be one of the following types: "\n "Mapping[str, DType] | Schema | Sequence[str]. "\n f"Got {type(schema)}."\n )\n raise TypeError(msg)\n implementation = Implementation.from_backend(backend)\n if is_eager_allowed(implementation):\n ns = Version.MAIN.namespace.from_backend(implementation).compliant\n return ns.from_numpy(data, schema).to_narwhals()\n elif implementation is Implementation.UNKNOWN: # pragma: no cover\n _native_namespace = implementation.to_native_namespace()\n try:\n # implementation is UNKNOWN, Narwhals extension using this feature should\n # implement `from_numpy` function in the top-level namespace.\n native_frame: NativeFrame = _native_namespace.from_numpy(data, schema=schema)\n except AttributeError as e:\n msg = "Unknown namespace is expected to implement `from_numpy` function."\n raise AttributeError(msg) from e\n return from_native(native_frame, eager_only=True)\n msg = (\n f"{implementation} support in Narwhals is lazy-only, but `from_numpy` is an eager-only function.\n\n"\n "Hint: you may want to use an eager backend and then call `.lazy`, e.g.:\n\n"\n f" nw.from_numpy(arr, backend='pyarrow').lazy('{implementation}')"\n )\n raise ValueError(msg)\n\n\ndef _is_into_schema(obj: Any) -> TypeIs[_IntoSchema]:\n from narwhals.schema import Schema\n\n return (\n obj is None or isinstance(obj, (Mapping, Schema)) or is_sequence_but_not_str(obj)\n )\n\n\n@deprecate_native_namespace(warn_version="1.31.0", required=True)\ndef from_arrow(\n native_frame: IntoArrowTable,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n) -> DataFrame[Any]: # pragma: no cover\n """Construct a DataFrame from an object which supports the PyCapsule Interface.\n\n Arguments:\n native_frame: Object which implements `__arrow_c_stream__`.\n backend: specifies which eager backend instantiate to.\n\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n Returns:\n A new DataFrame.\n\n Examples:\n >>> import pandas as pd\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> df_native = pd.DataFrame({"a": [1, 2], "b": [4.2, 5.1]})\n >>> nw.from_arrow(df_native, backend="polars")\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | shape: (2, 2) |\n | ┌─────┬─────┐ |\n | │ a ┆ b │ |\n | │ --- ┆ --- │ |\n | │ i64 ┆ f64 │ |\n | ╞═════╪═════╡ |\n | │ 1 ┆ 4.2 │ |\n | │ 2 ┆ 5.1 │ |\n | └─────┴─────┘ |\n └──────────────────┘\n """\n backend = cast("ModuleType | Implementation | str", backend)\n if not (supports_arrow_c_stream(native_frame) or is_pyarrow_table(native_frame)):\n msg = f"Given object of type {type(native_frame)} does not support PyCapsule interface"\n raise TypeError(msg)\n implementation = Implementation.from_backend(backend)\n if is_eager_allowed(implementation):\n ns = Version.MAIN.namespace.from_backend(implementation).compliant\n return ns._dataframe.from_arrow(native_frame, context=ns).to_narwhals()\n elif implementation is Implementation.UNKNOWN: # pragma: no cover\n _native_namespace = implementation.to_native_namespace()\n try:\n # implementation is UNKNOWN, Narwhals extension using this feature should\n # implement PyCapsule support\n native: NativeFrame = _native_namespace.DataFrame(native_frame)\n except AttributeError as e:\n msg = "Unknown namespace is expected to implement `DataFrame` class which accepts object which supports PyCapsule Interface."\n raise AttributeError(msg) from e\n return from_native(native, eager_only=True)\n msg = (\n f"{implementation} support in Narwhals is lazy-only, but `from_arrow` is an eager-only function.\n\n"\n "Hint: you may want to use an eager backend and then call `.lazy`, e.g.:\n\n"\n f" nw.from_arrow(df, backend='pyarrow').lazy('{implementation}')"\n )\n raise ValueError(msg)\n\n\ndef _get_sys_info() -> dict[str, str]:\n """System information.\n\n Returns system and Python version information\n\n Copied from sklearn\n\n Returns:\n Dictionary with system info.\n """\n python = sys.version.replace("\n", " ")\n\n blob = (\n ("python", python),\n ("executable", sys.executable),\n ("machine", platform.platform()),\n )\n\n return dict(blob)\n\n\ndef _get_deps_info() -> dict[str, str]:\n """Overview of the installed version of main dependencies.\n\n This function does not import the modules to collect the version numbers\n but instead relies on standard Python package metadata.\n\n Returns version information on relevant Python libraries\n\n This function and show_versions were copied from sklearn and adapted\n\n Returns:\n Mapping from dependency to version.\n """\n from importlib.metadata import PackageNotFoundError, version\n\n from narwhals import __version__\n\n deps = ("pandas", "polars", "cudf", "modin", "pyarrow", "numpy")\n deps_info = {"narwhals": __version__}\n\n for modname in deps:\n try:\n deps_info[modname] = version(modname)\n except PackageNotFoundError: # noqa: PERF203\n deps_info[modname] = ""\n return deps_info\n\n\ndef show_versions() -> None:\n """Print useful debugging information.\n\n Examples:\n >>> from narwhals import show_versions\n >>> show_versions() # doctest: +SKIP\n """\n sys_info = _get_sys_info()\n deps_info = _get_deps_info()\n\n print("\nSystem:") # noqa: T201\n for k, stat in sys_info.items():\n print(f"{k:>10}: {stat}") # noqa: T201\n\n print("\nPython dependencies:") # noqa: T201\n for k, stat in deps_info.items():\n print(f"{k:>13}: {stat}") # noqa: T201\n\n\n@deprecated(\n "`get_level` is deprecated, as Narwhals no longer supports the Dataframe Interchange Protocol."\n)\ndef get_level(\n obj: DataFrame[Any] | LazyFrame[Any] | Series[IntoSeriesT],\n) -> Literal["full", "lazy", "interchange"]:\n """Level of support Narwhals has for current object.\n\n Warning:\n `get_level` is deprecated and will be removed in a future version.\n "DuckDB and Ibis now have full lazy support in Narwhals, and passing\n them to `nw.from_native` returns `nw.LazyFrame`.\n Note: this will remain available in `narwhals.stable.v1`.\n See [stable api](../backcompat.md/) for more information.\n\n Arguments:\n obj: Dataframe or Series.\n\n Returns:\n This can be one of\n\n - 'full': full Narwhals API support\n - 'lazy': only lazy operations are supported. This excludes anything\n which involves iterating over rows in Python.\n """\n issue_deprecation_warning(\n "`get_level` is deprecated, as Narwhals no longer supports the Dataframe Interchange Protocol.\n"\n "DuckDB and Ibis now have full lazy support in Narwhals, and passing them to `nw.from_native` \n"\n "returns `nw.LazyFrame`.",\n "1.43",\n )\n return obj._level\n\n\n@deprecate_native_namespace(warn_version="1.27.2", required=True)\ndef read_csv(\n source: str,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None,\n **kwargs: Any,\n) -> DataFrame[Any]:\n """Read a CSV file into a DataFrame.\n\n Arguments:\n source: Path to a file.\n backend: The eager backend for DataFrame creation.\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.27.2)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n kwargs: Extra keyword arguments which are passed to the native CSV reader.\n For example, you could use\n `nw.read_csv('file.csv', backend='pandas', engine='pyarrow')`.\n\n Returns:\n DataFrame.\n\n Examples:\n >>> import narwhals as nw\n >>> nw.read_csv("file.csv", backend="pandas") # doctest:+SKIP\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 1 4 |\n | 1 2 5 |\n └──────────────────┘\n """\n backend = cast("ModuleType | Implementation | str", backend)\n eager_backend = Implementation.from_backend(backend)\n native_namespace = eager_backend.to_native_namespace()\n native_frame: NativeFrame\n if eager_backend in {\n Implementation.POLARS,\n Implementation.PANDAS,\n Implementation.MODIN,\n Implementation.CUDF,\n }:\n native_frame = native_namespace.read_csv(source, **kwargs)\n elif eager_backend is Implementation.PYARROW:\n from pyarrow import csv # ignore-banned-import\n\n native_frame = csv.read_csv(source, **kwargs)\n else: # pragma: no cover\n try:\n # implementation is UNKNOWN, Narwhals extension using this feature should\n # implement `read_csv` function in the top-level namespace.\n native_frame = native_namespace.read_csv(source=source, **kwargs)\n except AttributeError as e:\n msg = "Unknown namespace is expected to implement `read_csv` function."\n raise AttributeError(msg) from e\n return from_native(native_frame, eager_only=True)\n\n\n@deprecate_native_namespace(warn_version="1.31.0", required=True)\ndef scan_csv(\n source: str,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None,\n **kwargs: Any,\n) -> LazyFrame[Any]:\n """Lazily read from a CSV file.\n\n For the libraries that do not support lazy dataframes, the function reads\n a csv file eagerly and then converts the resulting dataframe to a lazyframe.\n\n Arguments:\n source: Path to a file.\n backend: The eager backend for DataFrame creation.\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n kwargs: Extra keyword arguments which are passed to the native CSV reader.\n For example, you could use\n `nw.scan_csv('file.csv', backend=pd, engine='pyarrow')`.\n\n Returns:\n LazyFrame.\n\n Examples:\n >>> import duckdb\n >>> import narwhals as nw\n >>>\n >>> nw.scan_csv("file.csv", backend="duckdb").to_native() # doctest:+SKIP\n ┌─────────┬───────┐\n │ a │ b │\n │ varchar │ int32 │\n ├─────────┼───────┤\n │ x │ 1 │\n │ y │ 2 │\n │ z │ 3 │\n └─────────┴───────┘\n """\n backend = cast("ModuleType | Implementation | str", backend)\n implementation = Implementation.from_backend(backend)\n native_namespace = implementation.to_native_namespace()\n native_frame: NativeFrame | NativeLazyFrame\n if implementation is Implementation.POLARS:\n native_frame = native_namespace.scan_csv(source, **kwargs)\n elif implementation in {\n Implementation.PANDAS,\n Implementation.MODIN,\n Implementation.CUDF,\n Implementation.DASK,\n Implementation.DUCKDB,\n Implementation.IBIS,\n }:\n native_frame = native_namespace.read_csv(source, **kwargs)\n elif implementation is Implementation.PYARROW:\n from pyarrow import csv # ignore-banned-import\n\n native_frame = csv.read_csv(source, **kwargs)\n elif implementation.is_spark_like():\n if (session := kwargs.pop("session", None)) is None:\n msg = "Spark like backends require a session object to be passed in `kwargs`."\n raise ValueError(msg)\n\n csv_reader = session.read.format("csv")\n native_frame = (\n csv_reader.load(source)\n if (\n implementation is Implementation.SQLFRAME\n and parse_version(version("sqlframe")) < (3, 27, 0)\n )\n else csv_reader.options(**kwargs).load(source)\n )\n else: # pragma: no cover\n try:\n # implementation is UNKNOWN, Narwhals extension using this feature should\n # implement `scan_csv` function in the top-level namespace.\n native_frame = native_namespace.scan_csv(source=source, **kwargs)\n except AttributeError as e:\n msg = "Unknown namespace is expected to implement `scan_csv` function."\n raise AttributeError(msg) from e\n return from_native(native_frame).lazy()\n\n\n@deprecate_native_namespace(warn_version="1.31.0", required=True)\ndef read_parquet(\n source: str,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None,\n **kwargs: Any,\n) -> DataFrame[Any]:\n """Read into a DataFrame from a parquet file.\n\n Arguments:\n source: Path to a file.\n backend: The eager backend for DataFrame creation.\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n kwargs: Extra keyword arguments which are passed to the native parquet reader.\n For example, you could use\n `nw.read_parquet('file.parquet', backend=pd, engine='pyarrow')`.\n\n Returns:\n DataFrame.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> nw.read_parquet("file.parquet", backend="pyarrow") # doctest:+SKIP\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n |pyarrow.Table |\n |a: int64 |\n |c: double |\n |---- |\n |a: [[1,2]] |\n |c: [[0.2,0.1]] |\n └──────────────────┘\n """\n backend = cast("ModuleType | Implementation | str", backend)\n implementation = Implementation.from_backend(backend)\n native_namespace = implementation.to_native_namespace()\n native_frame: NativeFrame\n if implementation in {\n Implementation.POLARS,\n Implementation.PANDAS,\n Implementation.MODIN,\n Implementation.CUDF,\n Implementation.DUCKDB,\n Implementation.IBIS,\n }:\n native_frame = native_namespace.read_parquet(source, **kwargs)\n elif implementation is Implementation.PYARROW:\n import pyarrow.parquet as pq # ignore-banned-import\n\n native_frame = pq.read_table(source, **kwargs)\n else: # pragma: no cover\n try:\n # implementation is UNKNOWN, Narwhals extension using this feature should\n # implement `read_parquet` function in the top-level namespace.\n native_frame = native_namespace.read_parquet(source=source, **kwargs)\n except AttributeError as e:\n msg = "Unknown namespace is expected to implement `read_parquet` function."\n raise AttributeError(msg) from e\n return from_native(native_frame, eager_only=True)\n\n\n@deprecate_native_namespace(warn_version="1.31.0", required=True)\ndef scan_parquet(\n source: str,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None,\n **kwargs: Any,\n) -> LazyFrame[Any]:\n """Lazily read from a parquet file.\n\n For the libraries that do not support lazy dataframes, the function reads\n a parquet file eagerly and then converts the resulting dataframe to a lazyframe.\n\n Note:\n Spark like backends require a session object to be passed in `kwargs`.\n\n For instance:\n\n ```py\n import narwhals as nw\n from sqlframe.duckdb import DuckDBSession\n\n nw.scan_parquet(source, backend="sqlframe", session=DuckDBSession())\n ```\n\n Arguments:\n source: Path to a file.\n backend: The eager backend for DataFrame creation.\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN`, `CUDF`, `PYSPARK` or `SQLFRAME`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"`, `"cudf"`,\n `"pyspark"` or `"sqlframe"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin`, `cudf`,\n `pyspark.sql` or `sqlframe`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n kwargs: Extra keyword arguments which are passed to the native parquet reader.\n For example, you could use\n `nw.scan_parquet('file.parquet', backend=pd, engine='pyarrow')`.\n\n Returns:\n LazyFrame.\n\n Examples:\n >>> import dask.dataframe as dd\n >>> from sqlframe.duckdb import DuckDBSession\n >>> import narwhals as nw\n >>>\n >>> nw.scan_parquet("file.parquet", backend="dask").collect() # doctest:+SKIP\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 1 4 |\n | 1 2 5 |\n └──────────────────┘\n >>> nw.scan_parquet(\n ... "file.parquet", backend="sqlframe", session=DuckDBSession()\n ... ).collect() # doctest:+SKIP\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | pyarrow.Table |\n | a: int64 |\n | b: int64 |\n | ---- |\n | a: [[1,2]] |\n | b: [[4,5]] |\n └──────────────────┘\n """\n backend = cast("ModuleType | Implementation | str", backend)\n implementation = Implementation.from_backend(backend)\n native_namespace = implementation.to_native_namespace()\n native_frame: NativeFrame | NativeLazyFrame\n if implementation is Implementation.POLARS:\n native_frame = native_namespace.scan_parquet(source, **kwargs)\n elif implementation in {\n Implementation.PANDAS,\n Implementation.MODIN,\n Implementation.CUDF,\n Implementation.DASK,\n Implementation.DUCKDB,\n Implementation.IBIS,\n }:\n native_frame = native_namespace.read_parquet(source, **kwargs)\n elif implementation is Implementation.PYARROW:\n import pyarrow.parquet as pq # ignore-banned-import\n\n native_frame = pq.read_table(source, **kwargs)\n elif implementation.is_spark_like():\n if (session := kwargs.pop("session", None)) is None:\n msg = "Spark like backends require a session object to be passed in `kwargs`."\n raise ValueError(msg)\n\n pq_reader = session.read.format("parquet")\n native_frame = (\n pq_reader.load(source)\n if (\n implementation is Implementation.SQLFRAME\n and parse_version(version("sqlframe")) < (3, 27, 0)\n )\n else pq_reader.options(**kwargs).load(source)\n )\n\n else: # pragma: no cover\n try:\n # implementation is UNKNOWN, Narwhals extension using this feature should\n # implement `scan_parquet` function in the top-level namespace.\n native_frame = native_namespace.scan_parquet(source=source, **kwargs)\n except AttributeError as e:\n msg = "Unknown namespace is expected to implement `scan_parquet` function."\n raise AttributeError(msg) from e\n return from_native(native_frame).lazy()\n\n\ndef col(*names: str | Iterable[str]) -> Expr:\n """Creates an expression that references one or more columns by their name(s).\n\n Arguments:\n names: Name(s) of the columns to use.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> df_native = pl.DataFrame({"a": [1, 2], "b": [3, 4], "c": ["x", "z"]})\n >>> nw.from_native(df_native).select(nw.col("a", "b") * nw.col("b"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | shape: (2, 2) |\n | ┌─────┬─────┐ |\n | │ a ┆ b │ |\n | │ --- ┆ --- │ |\n | │ i64 ┆ i64 │ |\n | ╞═════╪═════╡ |\n | │ 3 ┆ 9 │ |\n | │ 8 ┆ 16 │ |\n | └─────┴─────┘ |\n └──────────────────┘\n """\n flat_names = flatten(names)\n\n def func(plx: Any) -> Any:\n return plx.col(*flat_names)\n\n return Expr(\n func,\n ExprMetadata.selector_single()\n if len(flat_names) == 1\n else ExprMetadata.selector_multi_named(),\n )\n\n\ndef exclude(*names: str | Iterable[str]) -> Expr:\n """Creates an expression that excludes columns by their name(s).\n\n Arguments:\n names: Name(s) of the columns to exclude.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> df_native = pl.DataFrame({"a": [1, 2], "b": [3, 4], "c": ["x", "z"]})\n >>> nw.from_native(df_native).select(nw.exclude("c", "a"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | shape: (2, 1) |\n | ┌─────┐ |\n | │ b │ |\n | │ --- │ |\n | │ i64 │ |\n | ╞═════╡ |\n | │ 3 │ |\n | │ 4 │ |\n | └─────┘ |\n └──────────────────┘\n """\n exclude_names = frozenset(flatten(names))\n\n def func(plx: Any) -> Any:\n return plx.exclude(exclude_names)\n\n return Expr(func, ExprMetadata.selector_multi_unnamed())\n\n\ndef nth(*indices: int | Sequence[int]) -> Expr:\n """Creates an expression that references one or more columns by their index(es).\n\n Notes:\n `nth` is not supported for Polars version<1.0.0. Please use\n [`narwhals.col`][] instead.\n\n Arguments:\n indices: One or more indices representing the columns to retrieve.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> df_native = pa.table({"a": [1, 2], "b": [3, 4], "c": [0.123, 3.14]})\n >>> nw.from_native(df_native).select(nw.nth(0, 2) * 2)\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n |pyarrow.Table |\n |a: int64 |\n |c: double |\n |---- |\n |a: [[2,4]] |\n |c: [[0.246,6.28]] |\n └──────────────────┘\n """\n flat_indices = flatten(indices)\n\n def func(plx: Any) -> Any:\n return plx.nth(*flat_indices)\n\n return Expr(\n func,\n ExprMetadata.selector_single()\n if len(flat_indices) == 1\n else ExprMetadata.selector_multi_unnamed(),\n )\n\n\n# Add underscore so it doesn't conflict with builtin `all`\ndef all_() -> Expr:\n """Instantiate an expression representing all columns.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> df_native = pd.DataFrame({"a": [1, 2], "b": [3.14, 0.123]})\n >>> nw.from_native(df_native).select(nw.all() * 2)\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 2 6.280 |\n | 1 4 0.246 |\n └──────────────────┘\n """\n return Expr(lambda plx: plx.all(), ExprMetadata.selector_multi_unnamed())\n\n\n# Add underscore so it doesn't conflict with builtin `len`\ndef len_() -> Expr:\n """Return the number of rows.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> df_native = pl.DataFrame({"a": [1, 2], "b": [5, None]})\n >>> nw.from_native(df_native).select(nw.len())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | shape: (1, 1) |\n | ┌─────┐ |\n | │ len │ |\n | │ --- │ |\n | │ u32 │ |\n | ╞═════╡ |\n | │ 2 │ |\n | └─────┘ |\n └──────────────────┘\n """\n\n def func(plx: Any) -> Any:\n return plx.len()\n\n return Expr(func, ExprMetadata.aggregation())\n\n\ndef sum(*columns: str) -> Expr:\n """Sum all values.\n\n Note:\n Syntactic sugar for ``nw.col(columns).sum()``\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> df_native = pd.DataFrame({"a": [1, 2], "b": [-1.4, 6.2]})\n >>> nw.from_native(df_native).select(nw.sum("a", "b"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 3 4.8 |\n └──────────────────┘\n """\n return col(*columns).sum()\n\n\ndef mean(*columns: str) -> Expr:\n """Get the mean value.\n\n Note:\n Syntactic sugar for ``nw.col(columns).mean()``\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> df_native = pa.table({"a": [1, 8, 3], "b": [3.14, 6.28, 42.1]})\n >>> nw.from_native(df_native).select(nw.mean("a", "b"))\n ┌─────────────────────────┐\n | Narwhals DataFrame |\n |-------------------------|\n |pyarrow.Table |\n |a: double |\n |b: double |\n |---- |\n |a: [[4]] |\n |b: [[17.173333333333336]]|\n └─────────────────────────┘\n """\n return col(*columns).mean()\n\n\ndef median(*columns: str) -> Expr:\n """Get the median value.\n\n Notes:\n - Syntactic sugar for ``nw.col(columns).median()``\n - Results might slightly differ across backends due to differences in the\n underlying algorithms used to compute the median.\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> df_native = pl.DataFrame({"a": [4, 5, 2]})\n >>> nw.from_native(df_native).select(nw.median("a"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | shape: (1, 1) |\n | ┌─────┐ |\n | │ a │ |\n | │ --- │ |\n | │ f64 │ |\n | ╞═════╡ |\n | │ 4.0 │ |\n | └─────┘ |\n └──────────────────┘\n """\n return col(*columns).median()\n\n\ndef min(*columns: str) -> Expr:\n """Return the minimum value.\n\n Note:\n Syntactic sugar for ``nw.col(columns).min()``.\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> df_native = pa.table({"a": [1, 2], "b": [5, 10]})\n >>> nw.from_native(df_native).select(nw.min("a", "b"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | pyarrow.Table |\n | a: int64 |\n | b: int64 |\n | ---- |\n | a: [[1]] |\n | b: [[5]] |\n └──────────────────┘\n """\n return col(*columns).min()\n\n\ndef max(*columns: str) -> Expr:\n """Return the maximum value.\n\n Note:\n Syntactic sugar for ``nw.col(columns).max()``.\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> df_native = pd.DataFrame({"a": [1, 2], "b": [5, 10]})\n >>> nw.from_native(df_native).select(nw.max("a", "b"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 2 10 |\n └──────────────────┘\n """\n return col(*columns).max()\n\n\ndef sum_horizontal(*exprs: IntoExpr | Iterable[IntoExpr]) -> Expr:\n """Sum all values horizontally across columns.\n\n Warning:\n Unlike Polars, we support horizontal sum over numeric columns only.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> df_native = pl.DataFrame({"a": [1, 2, 3], "b": [5, 10, None]})\n >>> nw.from_native(df_native).with_columns(sum=nw.sum_horizontal("a", "b"))\n ┌────────────────────┐\n | Narwhals DataFrame |\n |--------------------|\n |shape: (3, 3) |\n |┌─────┬──────┬─────┐|\n |│ a ┆ b ┆ sum │|\n |│ --- ┆ --- ┆ --- │|\n |│ i64 ┆ i64 ┆ i64 │|\n |╞═════╪══════╪═════╡|\n |│ 1 ┆ 5 ┆ 6 │|\n |│ 2 ┆ 10 ┆ 12 │|\n |│ 3 ┆ null ┆ 3 │|\n |└─────┴──────┴─────┘|\n └────────────────────┘\n """\n if not exprs:\n msg = "At least one expression must be passed to `sum_horizontal`"\n raise ValueError(msg)\n flat_exprs = flatten(exprs)\n return Expr(\n lambda plx: apply_n_ary_operation(\n plx, plx.sum_horizontal, *flat_exprs, str_as_lit=False\n ),\n ExprMetadata.from_horizontal_op(*flat_exprs),\n )\n\n\ndef min_horizontal(*exprs: IntoExpr | Iterable[IntoExpr]) -> Expr:\n """Get the minimum value horizontally across columns.\n\n Notes:\n We support `min_horizontal` over numeric columns only.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> df_native = pa.table({"a": [1, 8, 3], "b": [4, 5, None]})\n >>> nw.from_native(df_native).with_columns(h_min=nw.min_horizontal("a", "b"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | pyarrow.Table |\n | a: int64 |\n | b: int64 |\n | h_min: int64 |\n | ---- |\n | a: [[1,8,3]] |\n | b: [[4,5,null]] |\n | h_min: [[1,5,3]] |\n └──────────────────┘\n """\n if not exprs:\n msg = "At least one expression must be passed to `min_horizontal`"\n raise ValueError(msg)\n flat_exprs = flatten(exprs)\n return Expr(\n lambda plx: apply_n_ary_operation(\n plx, plx.min_horizontal, *flat_exprs, str_as_lit=False\n ),\n ExprMetadata.from_horizontal_op(*flat_exprs),\n )\n\n\ndef max_horizontal(*exprs: IntoExpr | Iterable[IntoExpr]) -> Expr:\n """Get the maximum value horizontally across columns.\n\n Notes:\n We support `max_horizontal` over numeric columns only.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> df_native = pl.DataFrame({"a": [1, 8, 3], "b": [4, 5, None]})\n >>> nw.from_native(df_native).with_columns(h_max=nw.max_horizontal("a", "b"))\n ┌──────────────────────┐\n | Narwhals DataFrame |\n |----------------------|\n |shape: (3, 3) |\n |┌─────┬──────┬───────┐|\n |│ a ┆ b ┆ h_max │|\n |│ --- ┆ --- ┆ --- │|\n |│ i64 ┆ i64 ┆ i64 │|\n |╞═════╪══════╪═══════╡|\n |│ 1 ┆ 4 ┆ 4 │|\n |│ 8 ┆ 5 ┆ 8 │|\n |│ 3 ┆ null ┆ 3 │|\n |└─────┴──────┴───────┘|\n └──────────────────────┘\n """\n if not exprs:\n msg = "At least one expression must be passed to `max_horizontal`"\n raise ValueError(msg)\n flat_exprs = flatten(exprs)\n return Expr(\n lambda plx: apply_n_ary_operation(\n plx, plx.max_horizontal, *flat_exprs, str_as_lit=False\n ),\n ExprMetadata.from_horizontal_op(*flat_exprs),\n )\n\n\nclass When:\n def __init__(self, *predicates: IntoExpr | Iterable[IntoExpr]) -> None:\n self._predicate = all_horizontal(*flatten(predicates), ignore_nulls=False)\n\n def then(self, value: IntoExpr | NonNestedLiteral | _1DArray) -> Then:\n kind = ExprKind.from_into_expr(value, str_as_lit=False)\n if self._predicate._metadata.is_scalar_like and not kind.is_scalar_like:\n msg = (\n "If you pass a scalar-like predicate to `nw.when`, then "\n "the `then` value must also be scalar-like."\n )\n raise ShapeError(msg)\n\n return Then(\n lambda plx: apply_n_ary_operation(\n plx,\n lambda *args: plx.when(args[0]).then(args[1]),\n self._predicate,\n value,\n str_as_lit=False,\n ),\n combine_metadata(\n self._predicate,\n value,\n str_as_lit=False,\n allow_multi_output=False,\n to_single_output=False,\n ),\n )\n\n\nclass Then(Expr):\n def otherwise(self, value: IntoExpr | NonNestedLiteral | _1DArray) -> Expr:\n kind = ExprKind.from_into_expr(value, str_as_lit=False)\n if self._metadata.is_scalar_like and not is_scalar_like(kind):\n msg = (\n "If you pass a scalar-like predicate to `nw.when`, then "\n "the `otherwise` value must also be scalar-like."\n )\n raise ShapeError(msg)\n\n def func(plx: CompliantNamespace[Any, Any]) -> CompliantExpr[Any, Any]:\n compliant_expr = self._to_compliant_expr(plx)\n compliant_value = extract_compliant(plx, value, str_as_lit=False)\n if (\n not self._metadata.is_scalar_like\n and is_scalar_like(kind)\n and is_compliant_expr(compliant_value)\n ):\n compliant_value = compliant_value.broadcast(kind)\n return compliant_expr.otherwise(compliant_value) # type: ignore[attr-defined, no-any-return]\n\n return Expr(\n func,\n combine_metadata(\n self,\n value,\n str_as_lit=False,\n allow_multi_output=False,\n to_single_output=False,\n ),\n )\n\n\ndef when(*predicates: IntoExpr | Iterable[IntoExpr]) -> When:\n """Start a `when-then-otherwise` expression.\n\n Expression similar to an `if-else` statement in Python. Always initiated by a\n `pl.when(<condition>).then(<value if condition>)`, and optionally followed by a\n `.otherwise(<value if condition is false>)` can be appended at the end. If not\n appended, and the condition is not `True`, `None` will be returned.\n\n Info:\n Chaining multiple `.when(<condition>).then(<value>)` statements is currently\n not supported.\n See [Narwhals#668](https://github.com/narwhals-dev/narwhals/issues/668).\n\n Arguments:\n predicates: Condition(s) that must be met in order to apply the subsequent\n statement. Accepts one or more boolean expressions, which are implicitly\n combined with `&`. String input is parsed as a column name.\n\n Returns:\n A "when" object, which `.then` can be called on.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> data = {"a": [1, 2, 3], "b": [5, 10, 15]}\n >>> df_native = pd.DataFrame(data)\n >>> nw.from_native(df_native).with_columns(\n ... nw.when(nw.col("a") < 3).then(5).otherwise(6).alias("a_when")\n ... )\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b a_when |\n | 0 1 5 5 |\n | 1 2 10 5 |\n | 2 3 15 6 |\n └──────────────────┘\n """\n return When(*predicates)\n\n\ndef all_horizontal(\n *exprs: IntoExpr | Iterable[IntoExpr], ignore_nulls: bool | None = None\n) -> Expr:\n r"""Compute the bitwise AND horizontally across columns.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n ignore_nulls: Whether to ignore nulls:\n\n - If `True`, null values are ignored. If there are no elements, the result\n is `True`.\n - If `False` (default), Kleene logic is followed. Note that this is not allowed for\n pandas with classical NumPy dtypes when null values are present.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> data = {\n ... "a": [False, False, True, True, False, None],\n ... "b": [False, True, True, None, None, None],\n ... }\n >>> df_native = pa.table(data)\n >>> nw.from_native(df_native).select(\n ... "a", "b", all=nw.all_horizontal("a", "b", ignore_nulls=False)\n ... )\n ┌─────────────────────────────────────────┐\n | Narwhals DataFrame |\n |-----------------------------------------|\n |pyarrow.Table |\n |a: bool |\n |b: bool |\n |all: bool |\n |---- |\n |a: [[false,false,true,true,false,null]] |\n |b: [[false,true,true,null,null,null]] |\n |all: [[false,false,true,null,false,null]]|\n └─────────────────────────────────────────┘\n\n """\n if not exprs:\n msg = "At least one expression must be passed to `all_horizontal`"\n raise ValueError(msg)\n if ignore_nulls is None:\n issue_deprecation_warning(\n "`ignore_nulls` will become a required argument in Narwhals 2.0. Please specify `ignore_nulls=True` or `ignore_nulls=False` to silence this warning.",\n _version="1.45",\n )\n ignore_nulls = False\n flat_exprs = flatten(exprs)\n return Expr(\n lambda plx: apply_n_ary_operation(\n plx,\n partial(plx.all_horizontal, ignore_nulls=ignore_nulls),\n *flat_exprs,\n str_as_lit=False,\n ),\n ExprMetadata.from_horizontal_op(*flat_exprs),\n )\n\n\ndef lit(value: NonNestedLiteral, dtype: IntoDType | None = None) -> Expr:\n """Return an expression representing a literal value.\n\n Arguments:\n value: The value to use as literal.\n dtype: The data type of the literal value. If not provided, the data type will\n be inferred by the native library.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> df_native = pd.DataFrame({"a": [1, 2]})\n >>> nw.from_native(df_native).with_columns(nw.lit(3))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a literal |\n | 0 1 3 |\n | 1 2 3 |\n └──────────────────┘\n """\n if is_numpy_array(value):\n msg = (\n "numpy arrays are not supported as literal values. "\n "Consider using `with_columns` to create a new column from the array."\n )\n raise ValueError(msg)\n\n if isinstance(value, (list, tuple)):\n msg = f"Nested datatypes are not supported yet. Got {value}"\n raise NotImplementedError(msg)\n\n return Expr(lambda plx: plx.lit(value, dtype), ExprMetadata.literal())\n\n\ndef any_horizontal(\n *exprs: IntoExpr | Iterable[IntoExpr], ignore_nulls: bool | None = None\n) -> Expr:\n r"""Compute the bitwise OR horizontally across columns.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n ignore_nulls: Whether to ignore nulls:\n\n - If `True`, null values are ignored. If there are no elements, the result\n is `False`.\n - If `False` (default), Kleene logic is followed. Note that this is not allowed for\n pandas with classical NumPy dtypes when null values are present.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> data = {\n ... "a": [False, False, True, True, False, None],\n ... "b": [False, True, True, None, None, None],\n ... }\n >>> df_native = pl.DataFrame(data)\n >>> nw.from_native(df_native).select(\n ... "a", "b", any=nw.any_horizontal("a", "b", ignore_nulls=False)\n ... )\n ┌─────────────────────────┐\n | Narwhals DataFrame |\n |-------------------------|\n |shape: (6, 3) |\n |┌───────┬───────┬───────┐|\n |│ a ┆ b ┆ any │|\n |│ --- ┆ --- ┆ --- │|\n |│ bool ┆ bool ┆ bool │|\n |╞═══════╪═══════╪═══════╡|\n |│ false ┆ false ┆ false │|\n |│ false ┆ true ┆ true │|\n |│ true ┆ true ┆ true │|\n |│ true ┆ null ┆ true │|\n |│ false ┆ null ┆ null │|\n |│ null ┆ null ┆ null │|\n |└───────┴───────┴───────┘|\n └─────────────────────────┘\n """\n if not exprs:\n msg = "At least one expression must be passed to `any_horizontal`"\n raise ValueError(msg)\n if ignore_nulls is None:\n issue_deprecation_warning(\n "`ignore_nulls` will become a required argument in Narwhals 2.0. Please specify `ignore_nulls=True` or `ignore_nulls=False` to silence this warning.",\n _version="1.45",\n )\n ignore_nulls = False\n flat_exprs = flatten(exprs)\n return Expr(\n lambda plx: apply_n_ary_operation(\n plx,\n partial(plx.any_horizontal, ignore_nulls=ignore_nulls),\n *flat_exprs,\n str_as_lit=False,\n ),\n ExprMetadata.from_horizontal_op(*flat_exprs),\n )\n\n\ndef mean_horizontal(*exprs: IntoExpr | Iterable[IntoExpr]) -> Expr:\n """Compute the mean of all values horizontally across columns.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> data = {"a": [1, 8, 3], "b": [4, 5, None], "c": ["x", "y", "z"]}\n >>> df_native = pa.table(data)\n\n We define a dataframe-agnostic function that computes the horizontal mean of "a"\n and "b" columns:\n\n >>> nw.from_native(df_native).select(nw.mean_horizontal("a", "b"))\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | pyarrow.Table |\n | a: double |\n | ---- |\n | a: [[2.5,6.5,3]] |\n └──────────────────┘\n """\n if not exprs:\n msg = "At least one expression must be passed to `mean_horizontal`"\n raise ValueError(msg)\n flat_exprs = flatten(exprs)\n return Expr(\n lambda plx: apply_n_ary_operation(\n plx, plx.mean_horizontal, *flat_exprs, str_as_lit=False\n ),\n ExprMetadata.from_horizontal_op(*flat_exprs),\n )\n\n\ndef concat_str(\n exprs: IntoExpr | Iterable[IntoExpr],\n *more_exprs: IntoExpr,\n separator: str = "",\n ignore_nulls: bool = False,\n) -> Expr:\n r"""Horizontally concatenate columns into a single string column.\n\n Arguments:\n exprs: Columns to concatenate into a single string column. Accepts expression\n input. Strings are parsed as column names, other non-expression inputs are\n parsed as literals. Non-`String` columns are cast to `String`.\n *more_exprs: Additional columns to concatenate into a single string column,\n specified as positional arguments.\n separator: String that will be used to separate the values of each column.\n ignore_nulls: Ignore null values (default is `False`).\n If set to `False`, null values will be propagated and if the row contains any\n null values, the output is null.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> data = {\n ... "a": [1, 2, 3],\n ... "b": ["dogs", "cats", None],\n ... "c": ["play", "swim", "walk"],\n ... }\n >>> df_native = pd.DataFrame(data)\n >>> (\n ... nw.from_native(df_native).select(\n ... nw.concat_str(\n ... [nw.col("a") * 2, nw.col("b"), nw.col("c")], separator=" "\n ... ).alias("full_sentence")\n ... )\n ... )\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | full_sentence |\n | 0 2 dogs play |\n | 1 4 cats swim |\n | 2 None |\n └──────────────────┘\n """\n flat_exprs = flatten([*flatten([exprs]), *more_exprs])\n return Expr(\n lambda plx: apply_n_ary_operation(\n plx,\n lambda *args: plx.concat_str(\n *args, separator=separator, ignore_nulls=ignore_nulls\n ),\n *flat_exprs,\n str_as_lit=False,\n ),\n combine_metadata(\n *flat_exprs, str_as_lit=False, allow_multi_output=True, to_single_output=True\n ),\n )\n
.venv\Lib\site-packages\narwhals\functions.py
functions.py
Python
68,710
0.75
0.092973
0.029059
react-lib
788
2024-08-16T12:02:20.227558
GPL-3.0
false
83489e650a4c088d8a4d3e84fa620519
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Generic, TypeVar\n\nfrom narwhals._expression_parsing import all_exprs_are_scalar_like\nfrom narwhals._utils import flatten, tupleify\nfrom narwhals.exceptions import InvalidOperationError\nfrom narwhals.typing import DataFrameT\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Iterator, Sequence\n\n from narwhals._compliant.typing import CompliantExprAny\n from narwhals.dataframe import LazyFrame\n from narwhals.expr import Expr\n\nLazyFrameT = TypeVar("LazyFrameT", bound="LazyFrame[Any]")\n\n\nclass GroupBy(Generic[DataFrameT]):\n def __init__(\n self,\n df: DataFrameT,\n keys: Sequence[str] | Sequence[CompliantExprAny],\n /,\n *,\n drop_null_keys: bool,\n ) -> None:\n self._df: DataFrameT = df\n self._keys = keys\n self._grouped = self._df._compliant_frame.group_by(\n self._keys, drop_null_keys=drop_null_keys\n )\n\n def agg(self, *aggs: Expr | Iterable[Expr], **named_aggs: Expr) -> DataFrameT:\n """Compute aggregations for each group of a group by operation.\n\n Arguments:\n aggs: Aggregations to compute for each group of the group by operation,\n specified as positional arguments.\n named_aggs: Additional aggregations, specified as keyword arguments.\n\n Returns:\n A new Dataframe.\n\n Examples:\n Group by one column or by multiple columns and call `agg` to compute\n the grouped sum of another column.\n\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame(\n ... {\n ... "a": ["a", "b", "a", "b", "c"],\n ... "b": [1, 2, 1, 3, 3],\n ... "c": [5, 4, 3, 2, 1],\n ... }\n ... )\n >>> df = nw.from_native(df_native)\n >>>\n >>> df.group_by("a").agg(nw.col("b").sum()).sort("a")\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | a b |\n | 0 a 2 |\n | 1 b 5 |\n | 2 c 3 |\n └──────────────────┘\n >>>\n >>> df.group_by("a", "b").agg(nw.col("c").sum()).sort("a", "b").to_native()\n a b c\n 0 a 1 8\n 1 b 2 4\n 2 b 3 2\n 3 c 3 1\n """\n flat_aggs = tuple(flatten(aggs))\n if not all_exprs_are_scalar_like(*flat_aggs, **named_aggs):\n msg = (\n "Found expression which does not aggregate.\n\n"\n "All expressions passed to GroupBy.agg must aggregate.\n"\n "For example, `df.group_by('a').agg(nw.col('b').sum())` is valid,\n"\n "but `df.group_by('a').agg(nw.col('b'))` is not."\n )\n raise InvalidOperationError(msg)\n plx = self._df.__narwhals_namespace__()\n compliant_aggs = (\n *(x._to_compliant_expr(plx) for x in flat_aggs),\n *(\n value.alias(key)._to_compliant_expr(plx)\n for key, value in named_aggs.items()\n ),\n )\n return self._df._with_compliant(self._grouped.agg(*compliant_aggs))\n\n def __iter__(self) -> Iterator[tuple[Any, DataFrameT]]:\n yield from (\n (tupleify(key), self._df._with_compliant(df))\n for (key, df) in self._grouped.__iter__()\n )\n\n\nclass LazyGroupBy(Generic[LazyFrameT]):\n def __init__(\n self,\n df: LazyFrameT,\n keys: Sequence[str] | Sequence[CompliantExprAny],\n /,\n *,\n drop_null_keys: bool,\n ) -> None:\n self._df: LazyFrameT = df\n self._keys = keys\n self._grouped = self._df._compliant_frame.group_by(\n self._keys, drop_null_keys=drop_null_keys\n )\n\n def agg(self, *aggs: Expr | Iterable[Expr], **named_aggs: Expr) -> LazyFrameT:\n """Compute aggregations for each group of a group by operation.\n\n Arguments:\n aggs: Aggregations to compute for each group of the group by operation,\n specified as positional arguments.\n named_aggs: Additional aggregations, specified as keyword arguments.\n\n Returns:\n A new LazyFrame.\n\n Examples:\n Group by one column or by multiple columns and call `agg` to compute\n the grouped sum of another column.\n\n >>> import polars as pl\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoFrameT\n >>> lf_native = pl.LazyFrame(\n ... {\n ... "a": ["a", "b", "a", "b", "c"],\n ... "b": [1, 2, 1, 3, 3],\n ... "c": [5, 4, 3, 2, 1],\n ... }\n ... )\n >>> lf = nw.from_native(lf_native)\n >>>\n >>> nw.to_native(lf.group_by("a").agg(nw.col("b").sum()).sort("a")).collect()\n shape: (3, 2)\n ┌─────┬─────┐\n │ a ┆ b │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════╪═════╡\n │ a ┆ 2 │\n │ b ┆ 5 │\n │ c ┆ 3 │\n └─────┴─────┘\n >>>\n >>> lf.group_by("a", "b").agg(nw.sum("c")).sort("a", "b").collect()\n ┌───────────────────┐\n |Narwhals DataFrame |\n |-------------------|\n |shape: (4, 3) |\n |┌─────┬─────┬─────┐|\n |│ a ┆ b ┆ c │|\n |│ --- ┆ --- ┆ --- │|\n |│ str ┆ i64 ┆ i64 │|\n |╞═════╪═════╪═════╡|\n |│ a ┆ 1 ┆ 8 │|\n |│ b ┆ 2 ┆ 4 │|\n |│ b ┆ 3 ┆ 2 │|\n |│ c ┆ 3 ┆ 1 │|\n |└─────┴─────┴─────┘|\n └───────────────────┘\n """\n flat_aggs = tuple(flatten(aggs))\n if not all_exprs_are_scalar_like(*flat_aggs, **named_aggs):\n msg = (\n "Found expression which does not aggregate.\n\n"\n "All expressions passed to GroupBy.agg must aggregate.\n"\n "For example, `df.group_by('a').agg(nw.col('b').sum())` is valid,\n"\n "but `df.group_by('a').agg(nw.col('b'))` is not."\n )\n raise InvalidOperationError(msg)\n plx = self._df.__narwhals_namespace__()\n compliant_aggs = (\n *(x._to_compliant_expr(plx) for x in flat_aggs),\n *(\n value.alias(key)._to_compliant_expr(plx)\n for key, value in named_aggs.items()\n ),\n )\n return self._df._with_compliant(self._grouped.agg(*compliant_aggs))\n
.venv\Lib\site-packages\narwhals\group_by.py
group_by.py
Python
7,258
0.85
0.098958
0.034884
awesome-app
576
2023-11-19T01:21:40.329821
GPL-3.0
false
d09580f4ba3f196eecfd268107624f6a
"""Schema.\n\nAdapted from Polars implementation at:\nhttps://github.com/pola-rs/polars/blob/main/py-polars/polars/schema.py.\n"""\n\nfrom __future__ import annotations\n\nfrom collections import OrderedDict\nfrom functools import partial\nfrom typing import TYPE_CHECKING, cast\n\nfrom narwhals._utils import Implementation, Version, parse_version\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Mapping\n from typing import Any, ClassVar\n\n import polars as pl\n import pyarrow as pa\n\n from narwhals.dtypes import DType\n from narwhals.typing import DTypeBackend\n\n BaseSchema = OrderedDict[str, DType]\nelse:\n # Python 3.8 does not support generic OrderedDict at runtime\n BaseSchema = OrderedDict\n\n__all__ = ["Schema"]\n\n\nclass Schema(BaseSchema):\n """Ordered mapping of column names to their data type.\n\n Arguments:\n schema: The schema definition given by column names and their associated\n *instantiated* Narwhals data type. Accepts a mapping or an iterable of tuples.\n\n Examples:\n Define a schema by passing *instantiated* data types.\n\n >>> import narwhals as nw\n >>> schema = nw.Schema({"foo": nw.Int8(), "bar": nw.String()})\n >>> schema\n Schema({'foo': Int8, 'bar': String})\n\n Access the data type associated with a specific column name.\n\n >>> schema["foo"]\n Int8\n\n Access various schema properties using the `names`, `dtypes`, and `len` methods.\n\n >>> schema.names()\n ['foo', 'bar']\n >>> schema.dtypes()\n [Int8, String]\n >>> schema.len()\n 2\n """\n\n _version: ClassVar[Version] = Version.MAIN\n\n def __init__(\n self, schema: Mapping[str, DType] | Iterable[tuple[str, DType]] | None = None\n ) -> None:\n schema = schema or {}\n super().__init__(schema)\n\n def names(self) -> list[str]:\n """Get the column names of the schema.\n\n Returns:\n Column names.\n """\n return list(self.keys())\n\n def dtypes(self) -> list[DType]:\n """Get the data types of the schema.\n\n Returns:\n Data types of schema.\n """\n return list(self.values())\n\n def len(self) -> int:\n """Get the number of columns in the schema.\n\n Returns:\n Number of columns.\n """\n return len(self)\n\n def to_arrow(self) -> pa.Schema:\n """Convert Schema to a pyarrow Schema.\n\n Returns:\n A pyarrow Schema.\n\n Examples:\n >>> import narwhals as nw\n >>> schema = nw.Schema({"a": nw.Int64(), "b": nw.Datetime("ns")})\n >>> schema.to_arrow()\n a: int64\n b: timestamp[ns]\n """\n import pyarrow as pa # ignore-banned-import\n\n from narwhals._arrow.utils import narwhals_to_native_dtype\n\n return pa.schema(\n (name, narwhals_to_native_dtype(dtype, self._version))\n for name, dtype in self.items()\n )\n\n def to_pandas(\n self, dtype_backend: DTypeBackend | Iterable[DTypeBackend] = None\n ) -> dict[str, Any]:\n """Convert Schema to an ordered mapping of column names to their pandas data type.\n\n Arguments:\n dtype_backend: Backend(s) used for the native types. When providing more than\n one, the length of the iterable must be equal to the length of the schema.\n\n Returns:\n An ordered mapping of column names to their pandas data type.\n\n Examples:\n >>> import narwhals as nw\n >>> schema = nw.Schema({"a": nw.Int64(), "b": nw.Datetime("ns")})\n >>> schema.to_pandas()\n {'a': 'int64', 'b': 'datetime64[ns]'}\n\n >>> schema.to_pandas("pyarrow")\n {'a': 'Int64[pyarrow]', 'b': 'timestamp[ns][pyarrow]'}\n """\n import pandas as pd # ignore-banned-import\n\n from narwhals._pandas_like.utils import narwhals_to_native_dtype\n\n to_native_dtype = partial(\n narwhals_to_native_dtype,\n implementation=Implementation.PANDAS,\n backend_version=parse_version(pd),\n version=self._version,\n )\n if dtype_backend is None or isinstance(dtype_backend, str):\n return {\n name: to_native_dtype(dtype=dtype, dtype_backend=dtype_backend)\n for name, dtype in self.items()\n }\n else:\n backends = tuple(dtype_backend)\n if len(backends) != len(self):\n from itertools import chain, islice, repeat\n\n n_user, n_actual = len(backends), len(self)\n suggestion = tuple(\n islice(\n chain.from_iterable(islice(repeat(backends), n_actual)), n_actual\n )\n )\n msg = (\n f"Provided {n_user!r} `dtype_backend`(s), but schema contains {n_actual!r} field(s).\n"\n "Hint: instead of\n"\n f" schema.to_pandas({backends})\n"\n "you may want to use\n"\n f" schema.to_pandas({backends[0]})\n"\n f"or\n"\n f" schema.to_pandas({suggestion})"\n )\n raise ValueError(msg)\n return {\n name: to_native_dtype(dtype=dtype, dtype_backend=backend)\n for name, dtype, backend in zip(self.keys(), self.values(), backends)\n }\n\n def to_polars(self) -> pl.Schema:\n """Convert Schema to a polars Schema.\n\n Returns:\n A polars Schema or plain dict (prior to polars 1.0).\n\n Examples:\n >>> import narwhals as nw\n >>> schema = nw.Schema({"a": nw.Int64(), "b": nw.Datetime("ns")})\n >>> schema.to_polars()\n Schema({'a': Int64, 'b': Datetime(time_unit='ns', time_zone=None)})\n """\n import polars as pl # ignore-banned-import\n\n from narwhals._polars.utils import narwhals_to_native_dtype\n\n pl_version = parse_version(pl)\n schema = (\n (\n name,\n narwhals_to_native_dtype(\n dtype, self._version, backend_version=pl_version\n ),\n )\n for name, dtype in self.items()\n )\n return (\n pl.Schema(schema)\n if pl_version >= (1, 0, 0)\n else cast("pl.Schema", dict(schema))\n )\n
.venv\Lib\site-packages\narwhals\schema.py
schema.py
Python
6,488
0.95
0.08134
0.012121
awesome-app
827
2024-04-15T02:55:47.498201
BSD-3-Clause
false
bf0bd97dcd1a82d18587f565937a0bea
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, NoReturn\n\nfrom narwhals._expression_parsing import ExprMetadata, combine_metadata\nfrom narwhals._utils import flatten\nfrom narwhals.expr import Expr\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n from datetime import timezone\n\n from narwhals.dtypes import DType\n from narwhals.typing import TimeUnit\n\n\nclass Selector(Expr):\n def _to_expr(self) -> Expr:\n return Expr(self._to_compliant_expr, self._metadata)\n\n def __add__(self, other: Any) -> Expr: # type: ignore[override]\n if isinstance(other, Selector):\n msg = "unsupported operand type(s) for op: ('Selector' + 'Selector')"\n raise TypeError(msg)\n return self._to_expr() + other # type: ignore[no-any-return]\n\n def __or__(self, other: Any) -> Expr: # type: ignore[override]\n if isinstance(other, Selector):\n return self.__class__(\n lambda plx: self._to_compliant_expr(plx) | other._to_compliant_expr(plx),\n combine_metadata(\n self,\n other,\n str_as_lit=False,\n allow_multi_output=True,\n to_single_output=False,\n ),\n )\n return self._to_expr() | other # type: ignore[no-any-return]\n\n def __and__(self, other: Any) -> Expr: # type: ignore[override]\n if isinstance(other, Selector):\n return self.__class__(\n lambda plx: self._to_compliant_expr(plx) & other._to_compliant_expr(plx),\n combine_metadata(\n self,\n other,\n str_as_lit=False,\n allow_multi_output=True,\n to_single_output=False,\n ),\n )\n return self._to_expr() & other # type: ignore[no-any-return]\n\n def __rsub__(self, other: Any) -> NoReturn:\n raise NotImplementedError\n\n def __rand__(self, other: Any) -> NoReturn:\n raise NotImplementedError\n\n def __ror__(self, other: Any) -> NoReturn:\n raise NotImplementedError\n\n\ndef by_dtype(*dtypes: DType | type[DType] | Iterable[DType | type[DType]]) -> Selector:\n """Select columns based on their dtype.\n\n Arguments:\n dtypes: one or data types to select\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> import narwhals.selectors as ncs\n >>> df_native = pa.table({"a": [1, 2], "b": ["x", "y"], "c": [4.1, 2.3]})\n >>> df = nw.from_native(df_native)\n\n Let's select int64 and float64 dtypes and multiply each value by 2:\n\n >>> df.select(ncs.by_dtype(nw.Int64, nw.Float64) * 2).to_native()\n pyarrow.Table\n a: int64\n c: double\n ----\n a: [[2,4]]\n c: [[8.2,4.6]]\n """\n flattened = flatten(dtypes)\n return Selector(\n lambda plx: plx.selectors.by_dtype(flattened),\n ExprMetadata.selector_multi_unnamed(),\n )\n\n\ndef matches(pattern: str) -> Selector:\n """Select all columns that match the given regex pattern.\n\n Arguments:\n pattern: A valid regular expression pattern.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> import narwhals.selectors as ncs\n >>> df_native = pd.DataFrame(\n ... {"bar": [123, 456], "baz": [2.0, 5.5], "zap": [0, 1]}\n ... )\n >>> df = nw.from_native(df_native)\n\n Let's select column names containing an 'a', preceded by a character that is not 'z':\n\n >>> df.select(ncs.matches("[^z]a")).to_native()\n bar baz\n 0 123 2.0\n 1 456 5.5\n """\n return Selector(\n lambda plx: plx.selectors.matches(pattern), ExprMetadata.selector_multi_unnamed()\n )\n\n\ndef numeric() -> Selector:\n """Select numeric columns.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> import narwhals.selectors as ncs\n >>> df_native = pl.DataFrame({"a": [1, 2], "b": ["x", "y"], "c": [4.1, 2.3]})\n >>> df = nw.from_native(df_native)\n\n Let's select numeric dtypes and multiply each value by 2:\n\n >>> df.select(ncs.numeric() * 2).to_native()\n shape: (2, 2)\n ┌─────┬─────┐\n │ a ┆ c │\n │ --- ┆ --- │\n │ i64 ┆ f64 │\n ╞═════╪═════╡\n │ 2 ┆ 8.2 │\n │ 4 ┆ 4.6 │\n └─────┴─────┘\n """\n return Selector(\n lambda plx: plx.selectors.numeric(), ExprMetadata.selector_multi_unnamed()\n )\n\n\ndef boolean() -> Selector:\n """Select boolean columns.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> import narwhals.selectors as ncs\n >>> df_native = pl.DataFrame({"a": [1, 2], "b": ["x", "y"], "c": [False, True]})\n >>> df = nw.from_native(df_native)\n\n Let's select boolean dtypes:\n\n >>> df.select(ncs.boolean())\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n | shape: (2, 1) |\n | ┌───────┐ |\n | │ c │ |\n | │ --- │ |\n | │ bool │ |\n | ╞═══════╡ |\n | │ false │ |\n | │ true │ |\n | └───────┘ |\n └──────────────────┘\n """\n return Selector(\n lambda plx: plx.selectors.boolean(), ExprMetadata.selector_multi_unnamed()\n )\n\n\ndef string() -> Selector:\n """Select string columns.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> import narwhals.selectors as ncs\n >>> df_native = pl.DataFrame({"a": [1, 2], "b": ["x", "y"], "c": [False, True]})\n >>> df = nw.from_native(df_native)\n\n Let's select string dtypes:\n\n >>> df.select(ncs.string()).to_native()\n shape: (2, 1)\n ┌─────┐\n │ b │\n │ --- │\n │ str │\n ╞═════╡\n │ x │\n │ y │\n └─────┘\n """\n return Selector(\n lambda plx: plx.selectors.string(), ExprMetadata.selector_multi_unnamed()\n )\n\n\ndef categorical() -> Selector:\n """Select categorical columns.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> import narwhals.selectors as ncs\n >>> df_native = pl.DataFrame({"a": [1, 2], "b": ["x", "y"], "c": [False, True]})\n\n Let's convert column "b" to categorical, and then select categorical dtypes:\n\n >>> df = nw.from_native(df_native).with_columns(\n ... b=nw.col("b").cast(nw.Categorical())\n ... )\n >>> df.select(ncs.categorical()).to_native()\n shape: (2, 1)\n ┌─────┐\n │ b │\n │ --- │\n │ cat │\n ╞═════╡\n │ x │\n │ y │\n └─────┘\n """\n return Selector(\n lambda plx: plx.selectors.categorical(), ExprMetadata.selector_multi_unnamed()\n )\n\n\ndef all() -> Selector:\n """Select all columns.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> import narwhals.selectors as ncs\n >>> df_native = pd.DataFrame({"a": [1, 2], "b": ["x", "y"], "c": [False, True]})\n >>> df = nw.from_native(df_native)\n\n Let's select all dtypes:\n\n >>> df.select(ncs.all()).to_native()\n a b c\n 0 1 x False\n 1 2 y True\n """\n return Selector(\n lambda plx: plx.selectors.all(), ExprMetadata.selector_multi_unnamed()\n )\n\n\ndef datetime(\n time_unit: TimeUnit | Iterable[TimeUnit] | None = None,\n time_zone: str | timezone | Iterable[str | timezone | None] | None = ("*", None),\n) -> Selector:\n """Select all datetime columns, optionally filtering by time unit/zone.\n\n Arguments:\n time_unit: One (or more) of the allowed timeunit precision strings, "ms", "us",\n "ns" and "s". Omit to select columns with any valid timeunit.\n time_zone: Specify which timezone(s) to select\n\n * One or more timezone strings, as defined in zoneinfo (to see valid options\n run `import zoneinfo; zoneinfo.available_timezones()` for a full list).\n * Set `None` to select Datetime columns that do not have a timezone.\n * Set `"*"` to select Datetime columns that have *any* timezone.\n\n Returns:\n A new expression.\n\n Examples:\n >>> from datetime import datetime, timezone\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> import narwhals.selectors as ncs\n >>>\n >>> utc_tz = timezone.utc\n >>> data = {\n ... "tstamp_utc": [\n ... datetime(2023, 4, 10, 12, 14, 16, 999000, tzinfo=utc_tz),\n ... datetime(2025, 8, 25, 14, 18, 22, 666000, tzinfo=utc_tz),\n ... ],\n ... "tstamp": [\n ... datetime(2000, 11, 20, 18, 12, 16, 600000),\n ... datetime(2020, 10, 30, 10, 20, 25, 123000),\n ... ],\n ... "numeric": [3.14, 6.28],\n ... }\n >>> df_native = pa.table(data)\n >>> df_nw = nw.from_native(df_native)\n >>> df_nw.select(ncs.datetime()).to_native()\n pyarrow.Table\n tstamp_utc: timestamp[us, tz=UTC]\n tstamp: timestamp[us]\n ----\n tstamp_utc: [[2023-04-10 12:14:16.999000Z,2025-08-25 14:18:22.666000Z]]\n tstamp: [[2000-11-20 18:12:16.600000,2020-10-30 10:20:25.123000]]\n\n Select only datetime columns that have any time_zone specification:\n\n >>> df_nw.select(ncs.datetime(time_zone="*")).to_native()\n pyarrow.Table\n tstamp_utc: timestamp[us, tz=UTC]\n ----\n tstamp_utc: [[2023-04-10 12:14:16.999000Z,2025-08-25 14:18:22.666000Z]]\n """\n return Selector(\n lambda plx: plx.selectors.datetime(time_unit=time_unit, time_zone=time_zone),\n ExprMetadata.selector_multi_unnamed(),\n )\n\n\n__all__ = [\n "all",\n "boolean",\n "by_dtype",\n "categorical",\n "datetime",\n "matches",\n "numeric",\n "string",\n]\n
.venv\Lib\site-packages\narwhals\selectors.py
selectors.py
Python
10,759
0.95
0.062323
0.010453
python-kit
53
2023-11-05T01:34:17.811258
GPL-3.0
false
b074041d0b8c356a4f9453933c31b250
from __future__ import annotations\n\nimport math\nfrom collections.abc import Iterator, Mapping, Sequence\nfrom typing import TYPE_CHECKING, Any, Callable, Generic, Literal, overload\n\nfrom narwhals._utils import (\n _validate_rolling_arguments,\n ensure_type,\n generate_repr,\n is_compliant_series,\n is_index_selector,\n parse_version,\n supports_arrow_c_stream,\n)\nfrom narwhals.dependencies import is_numpy_scalar\nfrom narwhals.dtypes import _validate_dtype\nfrom narwhals.exceptions import ComputeError\nfrom narwhals.series_cat import SeriesCatNamespace\nfrom narwhals.series_dt import SeriesDateTimeNamespace\nfrom narwhals.series_list import SeriesListNamespace\nfrom narwhals.series_str import SeriesStringNamespace\nfrom narwhals.series_struct import SeriesStructNamespace\nfrom narwhals.translate import to_native\nfrom narwhals.typing import IntoSeriesT\n\nif TYPE_CHECKING:\n from types import ModuleType\n\n import pandas as pd\n import polars as pl\n import pyarrow as pa\n from typing_extensions import Self\n\n from narwhals._compliant import CompliantSeries\n from narwhals._utils import Implementation\n from narwhals.dataframe import DataFrame, MultiIndexSelector\n from narwhals.dtypes import DType\n from narwhals.typing import (\n ClosedInterval,\n FillNullStrategy,\n IntoDType,\n NonNestedLiteral,\n NumericLiteral,\n RankMethod,\n RollingInterpolationMethod,\n SingleIndexSelector,\n TemporalLiteral,\n _1DArray,\n )\n\n\nclass Series(Generic[IntoSeriesT]):\n """Narwhals Series, backed by a native series.\n\n Warning:\n This class is not meant to be instantiated directly - instead:\n\n - If the native object is a series from one of the supported backend (e.g.\n pandas.Series, polars.Series, pyarrow.ChunkedArray), you can use\n [`narwhals.from_native`][]:\n ```py\n narwhals.from_native(native_series, allow_series=True)\n narwhals.from_native(native_series, series_only=True)\n ```\n\n - If the object is a generic sequence (e.g. a list or a tuple of values), you can\n create a series via [`narwhals.new_series`][], e.g.:\n ```py\n narwhals.new_series(name="price", values=[10.5, 9.4, 1.2], backend="pandas")\n ```\n """\n\n @property\n def _dataframe(self) -> type[DataFrame[Any]]:\n from narwhals.dataframe import DataFrame\n\n return DataFrame\n\n def __init__(\n self, series: Any, *, level: Literal["full", "lazy", "interchange"]\n ) -> None:\n self._level: Literal["full", "lazy", "interchange"] = level\n if is_compliant_series(series):\n self._compliant_series: CompliantSeries[IntoSeriesT] = (\n series.__narwhals_series__()\n )\n else: # pragma: no cover\n msg = f"Expected Polars Series or an object which implements `__narwhals_series__`, got: {type(series)}."\n raise AssertionError(msg)\n\n @property\n def implementation(self) -> Implementation:\n """Return implementation of native Series.\n\n This can be useful when you need to use special-casing for features outside of\n Narwhals' scope - for example, when dealing with pandas' Period Dtype.\n\n Returns:\n Implementation.\n\n Examples:\n >>> import narwhals as nw\n >>> import pandas as pd\n\n >>> s_native = pd.Series([1, 2, 3])\n >>> s = nw.from_native(s_native, series_only=True)\n\n >>> s.implementation\n <Implementation.PANDAS: 'pandas'>\n\n >>> s.implementation.is_pandas()\n True\n\n >>> s.implementation.is_pandas_like()\n True\n\n >>> s.implementation.is_polars()\n False\n """\n return self._compliant_series._implementation\n\n def __array__(self, dtype: Any = None, copy: bool | None = None) -> _1DArray: # noqa: FBT001\n return self._compliant_series.__array__(dtype=dtype, copy=copy)\n\n @overload\n def __getitem__(self, idx: SingleIndexSelector) -> Any: ...\n\n @overload\n def __getitem__(self, idx: MultiIndexSelector) -> Self: ...\n\n def __getitem__(self, idx: SingleIndexSelector | MultiIndexSelector) -> Any | Self:\n """Retrieve elements from the object using integer indexing or slicing.\n\n Arguments:\n idx: The index, slice, or sequence of indices to retrieve.\n\n - If `idx` is an integer, a single element is returned.\n - If `idx` is a slice, a sequence of integers, or another Series\n (with integer values) a subset of the Series is returned.\n\n Returns:\n A single element if `idx` is an integer, else a subset of the Series.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3]])\n >>> nw.from_native(s_native, series_only=True)[0]\n 1\n\n >>> nw.from_native(s_native, series_only=True)[\n ... :2\n ... ].to_native() # doctest:+ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 1,\n 2\n ]\n ]\n """\n if isinstance(idx, int) or (\n is_numpy_scalar(idx) and idx.dtype.kind in {"i", "u"}\n ):\n idx = int(idx) if not isinstance(idx, int) else idx\n return self._compliant_series.item(idx)\n\n if isinstance(idx, self.to_native().__class__):\n idx = self._with_compliant(self._compliant_series._with_native(idx))\n\n if not is_index_selector(idx):\n msg = (\n f"Unexpected type for `Series.__getitem__`: {type(idx)}.\n\n"\n "Hints:\n"\n "- use `s.item` to select a single item.\n"\n "- Use `s[indices]` to select rows positionally.\n"\n "- Use `s.filter(mask)` to filter rows based on a boolean mask."\n )\n raise TypeError(msg)\n if isinstance(idx, Series):\n return self._with_compliant(self._compliant_series[idx._compliant_series])\n assert not isinstance(idx, int) # noqa: S101 # help mypy\n return self._with_compliant(self._compliant_series[idx])\n\n def __native_namespace__(self) -> ModuleType:\n return self._compliant_series.__native_namespace__()\n\n def __arrow_c_stream__(self, requested_schema: object | None = None) -> object:\n """Export a Series via the Arrow PyCapsule Interface.\n\n Narwhals doesn't implement anything itself here:\n\n - if the underlying series implements the interface, it'll return that\n - else, it'll call `to_arrow` and then defer to PyArrow's implementation\n\n See [PyCapsule Interface](https://arrow.apache.org/docs/dev/format/CDataInterface/PyCapsuleInterface.html)\n for more.\n """\n native_series = self._compliant_series.native\n if supports_arrow_c_stream(native_series):\n return native_series.__arrow_c_stream__(requested_schema=requested_schema)\n try:\n import pyarrow as pa # ignore-banned-import\n except ModuleNotFoundError as exc: # pragma: no cover\n msg = f"'pyarrow>=16.0.0' is required for `Series.__arrow_c_stream__` for object of type {type(native_series)}"\n raise ModuleNotFoundError(msg) from exc\n if parse_version(pa) < (16, 0): # pragma: no cover\n msg = f"'pyarrow>=16.0.0' is required for `Series.__arrow_c_stream__` for object of type {type(native_series)}"\n raise ModuleNotFoundError(msg)\n from narwhals._arrow.utils import chunked_array\n\n ca = chunked_array(self.to_arrow())\n return ca.__arrow_c_stream__(requested_schema=requested_schema)\n\n def to_native(self) -> IntoSeriesT:\n """Convert Narwhals series to native series.\n\n Returns:\n Series of class that user started with.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 2])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [i64]\n [\n 1\n 2\n ]\n """\n return self._compliant_series.native\n\n def scatter(self, indices: int | Sequence[int], values: Any) -> Self:\n """Set value(s) at given position(s).\n\n Arguments:\n indices: Position(s) to set items at.\n values: Values to set.\n\n Returns:\n A new Series with values set at given positions.\n\n Note:\n This method always returns a new Series, without modifying the original one.\n Using this function in a for-loop is an anti-pattern, we recommend building\n up your positions and values beforehand and doing an update in one go.\n\n For example, instead of\n\n ```python\n for i in [1, 3, 2]:\n value = some_function(i)\n s = s.scatter(i, value)\n ```\n\n prefer\n\n ```python\n positions = [1, 3, 2]\n values = [some_function(x) for x in positions]\n s = s.scatter(positions, values)\n ```\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> df_native = pa.table({"a": [1, 2, 3], "b": [4, 5, 6]})\n >>> df_nw = nw.from_native(df_native)\n >>> df_nw.with_columns(df_nw["a"].scatter([0, 1], [999, 888])).to_native()\n pyarrow.Table\n a: int64\n b: int64\n ----\n a: [[999,888,3]]\n b: [[4,5,6]]\n """\n return self._with_compliant(\n self._compliant_series.scatter(indices, self._extract_native(values))\n )\n\n @property\n def shape(self) -> tuple[int]:\n """Get the shape of the Series.\n\n Returns:\n A tuple containing the length of the Series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 2, 3])\n >>> nw.from_native(s_native, series_only=True).shape\n (3,)\n """\n return (self._compliant_series.len(),)\n\n def _extract_native(self, arg: Any) -> Any:\n from narwhals.series import Series\n\n if isinstance(arg, Series):\n return arg._compliant_series\n return arg\n\n def _with_compliant(self, series: Any) -> Self:\n return self.__class__(series, level=self._level)\n\n def pipe(self, function: Callable[[Any], Self], *args: Any, **kwargs: Any) -> Self:\n """Pipe function call.\n\n Returns:\n A new Series with the results of the piped function applied.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([1, 2, 3])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.pipe(lambda x: x + 2).to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (3,)\n Series: '' [i64]\n [\n 3\n 4\n 5\n ]\n """\n return function(self, *args, **kwargs)\n\n def __repr__(self) -> str: # pragma: no cover\n return generate_repr("Narwhals Series", self.to_native().__repr__())\n\n def __len__(self) -> int:\n return len(self._compliant_series)\n\n def len(self) -> int:\n r"""Return the number of elements in the Series.\n\n Null values count towards the total.\n\n Returns:\n The number of elements in the Series.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, None]])\n >>> nw.from_native(s_native, series_only=True).len()\n 3\n """\n return len(self._compliant_series)\n\n @property\n def dtype(self) -> DType:\n """Get the data type of the Series.\n\n Returns:\n The data type of the Series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 2, 3])\n >>> nw.from_native(s_native, series_only=True).dtype\n Int64\n """\n return self._compliant_series.dtype\n\n @property\n def name(self) -> str:\n """Get the name of the Series.\n\n Returns:\n The name of the Series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series("foo", [1, 2, 3])\n >>> nw.from_native(s_native, series_only=True).name\n 'foo'\n """\n return self._compliant_series.name\n\n def ewm_mean(\n self,\n *,\n com: float | None = None,\n span: float | None = None,\n half_life: float | None = None,\n alpha: float | None = None,\n adjust: bool = True,\n min_samples: int = 1,\n ignore_nulls: bool = False,\n ) -> Self:\n r"""Compute exponentially-weighted moving average.\n\n Arguments:\n com: Specify decay in terms of center of mass, $\gamma$, with <br> $\alpha = \frac{1}{1+\gamma}\forall\gamma\geq0$\n span: Specify decay in terms of span, $\theta$, with <br> $\alpha = \frac{2}{\theta + 1} \forall \theta \geq 1$\n half_life: Specify decay in terms of half-life, $\tau$, with <br> $\alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \tau } \right\} \forall \tau > 0$\n alpha: Specify smoothing factor alpha directly, $0 < \alpha \leq 1$.\n adjust: Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings\n\n - When `adjust=True` (the default) the EW function is calculated\n using weights $w_i = (1 - \alpha)^i$\n - When `adjust=False` the EW function is calculated recursively by\n $$\n y_0=x_0\n $$\n $$\n y_t = (1 - \alpha)y_{t - 1} + \alpha x_t\n $$\n min_samples: Minimum number of observations in window required to have a value (otherwise result is null).\n ignore_nulls: Ignore missing values when calculating weights.\n\n - When `ignore_nulls=False` (default), weights are based on absolute\n positions.\n For example, the weights of $x_0$ and $x_2$ used in\n calculating the final weighted average of $[x_0, None, x_2]$ are\n $(1-\alpha)^2$ and $1$ if `adjust=True`, and\n $(1-\alpha)^2$ and $\alpha$ if `adjust=False`.\n - When `ignore_nulls=True`, weights are based\n on relative positions. For example, the weights of\n $x_0$ and $x_2$ used in calculating the final weighted\n average of $[x_0, None, x_2]$ are\n $1-\alpha$ and $1$ if `adjust=True`,\n and $1-\alpha$ and $\alpha$ if `adjust=False`.\n\n Returns:\n Series\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series(name="a", data=[1, 2, 3])\n >>> nw.from_native(s_native, series_only=True).ewm_mean(\n ... com=1, ignore_nulls=False\n ... ).to_native()\n 0 1.000000\n 1 1.666667\n 2 2.428571\n Name: a, dtype: float64\n """\n return self._with_compliant(\n self._compliant_series.ewm_mean(\n com=com,\n span=span,\n half_life=half_life,\n alpha=alpha,\n adjust=adjust,\n min_samples=min_samples,\n ignore_nulls=ignore_nulls,\n )\n )\n\n def cast(self, dtype: IntoDType) -> Self:\n """Cast between data types.\n\n Arguments:\n dtype: Data type that the object will be cast into.\n\n Returns:\n A new Series with the specified data type.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[True, False, True]])\n >>> nw.from_native(s_native, series_only=True).cast(nw.Int64).to_native()\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 1,\n 0,\n 1\n ]\n ]\n """\n _validate_dtype(dtype)\n return self._with_compliant(self._compliant_series.cast(dtype))\n\n def to_frame(self) -> DataFrame[Any]:\n """Convert to dataframe.\n\n Returns:\n A DataFrame containing this Series as a single column.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series("a", [1, 2])\n >>> nw.from_native(s_native, series_only=True).to_frame().to_native()\n shape: (2, 1)\n ┌─────┐\n │ a │\n │ --- │\n │ i64 │\n ╞═════╡\n │ 1 │\n │ 2 │\n └─────┘\n """\n return self._dataframe(self._compliant_series.to_frame(), level=self._level)\n\n def to_list(self) -> list[Any]:\n """Convert to list.\n\n Notes:\n This function converts to Python scalars. It's typically\n more efficient to keep your data in the format native to\n your original dataframe, so we recommend only calling this\n when you absolutely need to.\n\n Returns:\n A list of Python objects.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3]])\n >>> nw.from_native(s_native, series_only=True).to_list()\n [1, 2, 3]\n """\n return self._compliant_series.to_list()\n\n def mean(self) -> float:\n """Reduce this Series to the mean value.\n\n Returns:\n The average of all elements in the Series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1.2, 4.2])\n >>> nw.from_native(s_native, series_only=True).mean()\n np.float64(2.7)\n """\n return self._compliant_series.mean()\n\n def median(self) -> float:\n """Reduce this Series to the median value.\n\n Notes:\n Results might slightly differ across backends due to differences in the underlying algorithms used to compute the median.\n\n Returns:\n The median value of all elements in the Series.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[5, 3, 8]])\n >>> nw.from_native(s_native, series_only=True).median()\n 5.0\n """\n return self._compliant_series.median()\n\n def skew(self) -> float | None:\n """Calculate the sample skewness of the Series.\n\n Returns:\n The sample skewness of the Series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 1, 2, 10, 100])\n >>> nw.from_native(s_native, series_only=True).skew()\n 1.4724267269058975\n\n Notes:\n The skewness is a measure of the asymmetry of the probability distribution.\n A perfectly symmetric distribution has a skewness of 0.\n """\n return self._compliant_series.skew()\n\n def kurtosis(self) -> float | None:\n """Compute the kurtosis (Fisher's definition) without bias correction.\n\n Kurtosis is the fourth central moment divided by the square of the variance.\n The Fisher's definition is used where 3.0 is subtracted from the result to give 0.0 for a normal distribution.\n\n Returns:\n The kurtosis (Fisher's definition) without bias correction of the column.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 1, 2, 10, 100])\n >>> nw.from_native(s_native, series_only=True).kurtosis()\n 0.2106571340718002\n """\n return self._compliant_series.kurtosis()\n\n def count(self) -> int:\n """Returns the number of non-null elements in the Series.\n\n Returns:\n The number of non-null elements in the Series.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, None]])\n >>> nw.from_native(s_native, series_only=True).count()\n 2\n """\n return self._compliant_series.count()\n\n def any(self) -> bool:\n """Return whether any of the values in the Series are True.\n\n If there are no non-null elements, the result is `False`.\n\n Notes:\n Only works on Series of data type Boolean.\n\n Returns:\n A boolean indicating if any values in the Series are True.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([False, True, False])\n >>> nw.from_native(s_native, series_only=True).any()\n np.True_\n """\n return self._compliant_series.any()\n\n def all(self) -> bool:\n """Return whether all values in the Series are True.\n\n If there are no non-null elements, the result is `True`.\n\n Returns:\n A boolean indicating if all values in the Series are True.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[False, True, False]])\n >>> nw.from_native(s_native, series_only=True).all()\n False\n """\n return self._compliant_series.all()\n\n def min(self) -> Any:\n """Get the minimal value in this Series.\n\n Returns:\n The minimum value in the Series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 2, 3])\n >>> nw.from_native(s_native, series_only=True).min()\n 1\n """\n return self._compliant_series.min()\n\n def max(self) -> Any:\n """Get the maximum value in this Series.\n\n Returns:\n The maximum value in the Series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 2, 3])\n >>> nw.from_native(s_native, series_only=True).max()\n np.int64(3)\n """\n return self._compliant_series.max()\n\n def arg_min(self) -> int:\n """Returns the index of the minimum value.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3]])\n >>> nw.from_native(s_native, series_only=True).arg_min()\n 0\n """\n return self._compliant_series.arg_min()\n\n def arg_max(self) -> int:\n """Returns the index of the maximum value.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 2, 3])\n >>> nw.from_native(s_native, series_only=True).arg_max()\n 2\n """\n return self._compliant_series.arg_max()\n\n def sum(self) -> float:\n """Reduce this Series to the sum value.\n\n If there are no non-null elements, the result is zero.\n\n Returns:\n The sum of all elements in the Series.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3]])\n >>> nw.from_native(s_native, series_only=True).sum()\n 6\n """\n return self._compliant_series.sum()\n\n def std(self, *, ddof: int = 1) -> float:\n """Get the standard deviation of this Series.\n\n Arguments:\n ddof: "Delta Degrees of Freedom": the divisor used in the calculation is N - ddof,\n where N represents the number of elements.\n\n Returns:\n The standard deviation of all elements in the Series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 2, 3])\n >>> nw.from_native(s_native, series_only=True).std()\n 1.0\n """\n return self._compliant_series.std(ddof=ddof)\n\n def var(self, *, ddof: int = 1) -> float:\n """Get the variance of this Series.\n\n Arguments:\n ddof: "Delta Degrees of Freedom": the divisor used in the calculation is N - ddof,\n where N represents the number of elements.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3]])\n >>> nw.from_native(s_native, series_only=True).var()\n 1.0\n """\n return self._compliant_series.var(ddof=ddof)\n\n def clip(\n self,\n lower_bound: Self | NumericLiteral | TemporalLiteral | None = None,\n upper_bound: Self | NumericLiteral | TemporalLiteral | None = None,\n ) -> Self:\n r"""Clip values in the Series.\n\n Arguments:\n lower_bound: Lower bound value.\n upper_bound: Upper bound value.\n\n Returns:\n A new Series with values clipped to the specified bounds.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([-1, 1, -3, 3, -5, 5])\n >>> nw.from_native(s_native, series_only=True).clip(-1, 3).to_native()\n 0 -1\n 1 1\n 2 -1\n 3 3\n 4 -1\n 5 3\n dtype: int64\n """\n return self._with_compliant(\n self._compliant_series.clip(\n lower_bound=self._extract_native(lower_bound),\n upper_bound=self._extract_native(upper_bound),\n )\n )\n\n def is_in(self, other: Any) -> Self:\n """Check if the elements of this Series are in the other sequence.\n\n Arguments:\n other: Sequence of primitive type.\n\n Returns:\n A new Series with boolean values indicating if the elements are in the other sequence.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3]])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.is_in([3, 2, 8]).to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n false,\n true,\n true\n ]\n ]\n """\n return self._with_compliant(\n self._compliant_series.is_in(to_native(other, pass_through=True))\n )\n\n def arg_true(self) -> Self:\n """Find elements where boolean Series is True.\n\n Returns:\n A new Series with the indices of elements that are True.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, None, None, 2])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).is_null().arg_true().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [u32]\n [\n 1\n 2\n ]\n """\n return self._with_compliant(self._compliant_series.arg_true())\n\n def drop_nulls(self) -> Self:\n """Drop null values.\n\n Notes:\n pandas handles null values differently from Polars and PyArrow.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n\n Returns:\n A new Series with null values removed.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([2, 4, None, 3, 5])\n >>> nw.from_native(s_native, series_only=True).drop_nulls().to_native()\n 0 2.0\n 1 4.0\n 3 3.0\n 4 5.0\n dtype: float64\n """\n return self._with_compliant(self._compliant_series.drop_nulls())\n\n def abs(self) -> Self:\n """Calculate the absolute value of each element.\n\n Returns:\n A new Series with the absolute values of the original elements.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[2, -4, 3]])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).abs().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 2,\n 4,\n 3\n ]\n ]\n """\n return self._with_compliant(self._compliant_series.abs())\n\n def cum_sum(self, *, reverse: bool = False) -> Self:\n """Calculate the cumulative sum.\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new Series with the cumulative sum of non-null values.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([2, 4, 3])\n >>> nw.from_native(s_native, series_only=True).cum_sum().to_native()\n 0 2\n 1 6\n 2 9\n dtype: int64\n """\n return self._with_compliant(self._compliant_series.cum_sum(reverse=reverse))\n\n def unique(self, *, maintain_order: bool = False) -> Self:\n """Returns unique values of the series.\n\n Arguments:\n maintain_order: Keep the same order as the original series. This may be more\n expensive to compute. Settings this to `True` blocks the possibility\n to run on the streaming engine for Polars.\n\n Returns:\n A new Series with duplicate values removed.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([2, 4, 4, 6])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.unique(\n ... maintain_order=True\n ... ).to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (3,)\n Series: '' [i64]\n [\n 2\n 4\n 6\n ]\n """\n return self._with_compliant(\n self._compliant_series.unique(maintain_order=maintain_order)\n )\n\n def diff(self) -> Self:\n """Calculate the difference with the previous element, for each element.\n\n Notes:\n pandas may change the dtype here, for example when introducing missing\n values in an integer column. To ensure, that the dtype doesn't change,\n you may want to use `fill_null` and `cast`. For example, to calculate\n the diff and fill missing values with `0` in a Int64 column, you could\n do:\n\n s.diff().fill_null(0).cast(nw.Int64)\n\n Returns:\n A new Series with the difference between each element and its predecessor.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[2, 4, 3]])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).diff().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n null,\n 2,\n -1\n ]\n ]\n """\n return self._with_compliant(self._compliant_series.diff())\n\n def shift(self, n: int) -> Self:\n """Shift values by `n` positions.\n\n Arguments:\n n: Number of indices to shift forward. If a negative value is passed,\n values are shifted in the opposite direction instead.\n\n Returns:\n A new Series with values shifted by n positions.\n\n Notes:\n pandas may change the dtype here, for example when introducing missing\n values in an integer column. To ensure, that the dtype doesn't change,\n you may want to use `fill_null` and `cast`. For example, to shift\n and fill missing values with `0` in a Int64 column, you could\n do:\n\n s.shift(1).fill_null(0).cast(nw.Int64)\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([2, 4, 3])\n >>> nw.from_native(s_native, series_only=True).shift(1).to_native()\n 0 NaN\n 1 2.0\n 2 4.0\n dtype: float64\n """\n ensure_type(n, int, param_name="n")\n\n return self._with_compliant(self._compliant_series.shift(n))\n\n def sample(\n self,\n n: int | None = None,\n *,\n fraction: float | None = None,\n with_replacement: bool = False,\n seed: int | None = None,\n ) -> Self:\n """Sample randomly from this Series.\n\n Arguments:\n n: Number of items to return. Cannot be used with fraction.\n fraction: Fraction of items to return. Cannot be used with n.\n with_replacement: Allow values to be sampled more than once.\n seed: Seed for the random number generator. If set to None (default), a random\n seed is generated for each sample operation.\n\n Returns:\n A new Series containing randomly sampled values from the original Series.\n\n Notes:\n The `sample` method returns a Series with a specified number of\n randomly selected items chosen from this Series.\n The results are not consistent across libraries.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 2, 3, 4])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.sample(\n ... fraction=1.0, with_replacement=True\n ... ).to_native() # doctest: +SKIP\n shape: (4,)\n Series: '' [i64]\n [\n 1\n 4\n 3\n 4\n ]\n """\n return self._with_compliant(\n self._compliant_series.sample(\n n=n, fraction=fraction, with_replacement=with_replacement, seed=seed\n )\n )\n\n def alias(self, name: str) -> Self:\n """Rename the Series.\n\n Notes:\n This method is very cheap, but does not guarantee that data\n will be copied. For example:\n\n ```python\n s1: nw.Series\n s2 = s1.alias("foo")\n arr = s2.to_numpy()\n arr[0] = 999\n ```\n\n may (depending on the backend, and on the version) result in\n `s1`'s data being modified. We recommend:\n\n - if you need to alias an object and don't need the original\n one around any more, just use `alias` without worrying about it.\n - if you were expecting `alias` to copy data, then explicitly call\n `.clone` before calling `alias`.\n\n Arguments:\n name: The new name.\n\n Returns:\n A new Series with the updated name.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 2, 3], name="foo")\n >>> nw.from_native(s_native, series_only=True).alias("bar").to_native()\n 0 1\n 1 2\n 2 3\n Name: bar, dtype: int64\n """\n return self._with_compliant(self._compliant_series.alias(name=name))\n\n def rename(self, name: str) -> Self:\n """Rename the Series.\n\n Alias for `Series.alias()`.\n\n Notes:\n This method is very cheap, but does not guarantee that data\n will be copied. For example:\n\n ```python\n s1: nw.Series\n s2 = s1.rename("foo")\n arr = s2.to_numpy()\n arr[0] = 999\n ```\n\n may (depending on the backend, and on the version) result in\n `s1`'s data being modified. We recommend:\n\n - if you need to rename an object and don't need the original\n one around any more, just use `rename` without worrying about it.\n - if you were expecting `rename` to copy data, then explicitly call\n `.clone` before calling `rename`.\n\n Arguments:\n name: The new name.\n\n Returns:\n A new Series with the updated name.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series("foo", [1, 2, 3])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.rename("bar").to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (3,)\n Series: 'bar' [i64]\n [\n 1\n 2\n 3\n ]\n """\n return self.alias(name=name)\n\n def replace_strict(\n self,\n old: Sequence[Any] | Mapping[Any, Any],\n new: Sequence[Any] | None = None,\n *,\n return_dtype: IntoDType | None = None,\n ) -> Self:\n """Replace all values by different values.\n\n This function must replace all non-null input values (else it raises an error).\n\n Arguments:\n old: Sequence of values to replace. It also accepts a mapping of values to\n their replacement as syntactic sugar for\n `replace_strict(old=list(mapping.keys()), new=list(mapping.values()))`.\n new: Sequence of values to replace by. Length must match the length of `old`.\n return_dtype: The data type of the resulting expression. If set to `None`\n (default), the data type is determined automatically based on the other\n inputs.\n\n Returns:\n A new Series with values replaced according to the mapping.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([3, 0, 1, 2], name="a")\n >>> nw.from_native(s_native, series_only=True).replace_strict(\n ... [0, 1, 2, 3], ["zero", "one", "two", "three"], return_dtype=nw.String\n ... ).to_native()\n 0 three\n 1 zero\n 2 one\n 3 two\n Name: a, dtype: object\n """\n if new is None:\n if not isinstance(old, Mapping):\n msg = "`new` argument is required if `old` argument is not a Mapping type"\n raise TypeError(msg)\n\n new = list(old.values())\n old = list(old.keys())\n\n return self._with_compliant(\n self._compliant_series.replace_strict(old, new, return_dtype=return_dtype)\n )\n\n def sort(self, *, descending: bool = False, nulls_last: bool = False) -> Self:\n """Sort this Series. Place null values first.\n\n Arguments:\n descending: Sort in descending order.\n nulls_last: Place null values last instead of first.\n\n Returns:\n A new sorted Series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([5, None, 1, 2])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.sort(descending=True).to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (4,)\n Series: '' [i64]\n [\n null\n 5\n 2\n 1\n ]\n """\n return self._with_compliant(\n self._compliant_series.sort(descending=descending, nulls_last=nulls_last)\n )\n\n def is_null(self) -> Self:\n """Returns a boolean Series indicating which values are null.\n\n Notes:\n pandas handles null values differently from Polars and PyArrow.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n\n Returns:\n A boolean Series indicating which values are null.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, None]])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).is_null().to_native() # doctest:+ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n false,\n false,\n true\n ]\n ]\n """\n return self._with_compliant(self._compliant_series.is_null())\n\n def is_nan(self) -> Self:\n """Returns a boolean Series indicating which values are NaN.\n\n Returns:\n A boolean Series indicating which values are NaN.\n\n Notes:\n pandas handles null values differently from Polars and PyArrow.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([0.0, None, 2.0], dtype="Float64")\n >>> nw.from_native(s_native, series_only=True).is_nan().to_native()\n 0 False\n 1 <NA>\n 2 False\n dtype: boolean\n """\n return self._with_compliant(self._compliant_series.is_nan())\n\n def fill_null(\n self,\n value: Self | NonNestedLiteral = None,\n strategy: FillNullStrategy | None = None,\n limit: int | None = None,\n ) -> Self:\n """Fill null values using the specified value.\n\n Arguments:\n value: Value used to fill null values.\n strategy: Strategy used to fill null values.\n limit: Number of consecutive null values to fill when using the 'forward' or 'backward' strategy.\n\n Notes:\n - pandas handles null values differently from other libraries.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n - For pandas Series of `object` dtype, `fill_null` will not automatically change the\n Series' dtype as pandas used to do. Explicitly call `cast` if you want the dtype to change.\n\n Returns:\n A new Series with null values filled according to the specified value or strategy.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 2, None])\n >>>\n >>> nw.from_native(s_native, series_only=True).fill_null(5).to_native()\n 0 1.0\n 1 2.0\n 2 5.0\n dtype: float64\n\n Or using a strategy:\n\n >>> nw.from_native(s_native, series_only=True).fill_null(\n ... strategy="forward", limit=1\n ... ).to_native()\n 0 1.0\n 1 2.0\n 2 2.0\n dtype: float64\n """\n if value is not None and strategy is not None:\n msg = "cannot specify both `value` and `strategy`"\n raise ValueError(msg)\n if value is None and strategy is None:\n msg = "must specify either a fill `value` or `strategy`"\n raise ValueError(msg)\n if strategy is not None and strategy not in {"forward", "backward"}:\n msg = f"strategy not supported: {strategy}"\n raise ValueError(msg)\n return self._with_compliant(\n self._compliant_series.fill_null(\n value=self._extract_native(value), strategy=strategy, limit=limit\n )\n )\n\n def is_between(\n self,\n lower_bound: Any | Self,\n upper_bound: Any | Self,\n closed: ClosedInterval = "both",\n ) -> Self:\n """Get a boolean mask of the values that are between the given lower/upper bounds.\n\n Arguments:\n lower_bound: Lower bound value.\n upper_bound: Upper bound value.\n closed: Define which sides of the interval are closed (inclusive).\n\n Notes:\n If the value of the `lower_bound` is greater than that of the `upper_bound`,\n then the values will be False, as no value can satisfy the condition.\n\n Returns:\n A boolean Series indicating which values are between the given bounds.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3, 4, 5]])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.is_between(2, 4, "right").to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n false,\n false,\n true,\n true,\n false\n ]\n ]\n """\n return self._with_compliant(\n self._compliant_series.is_between(\n self._extract_native(lower_bound),\n self._extract_native(upper_bound),\n closed=closed,\n )\n )\n\n def n_unique(self) -> int:\n """Count the number of unique values.\n\n Returns:\n Number of unique values in the Series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 2, 2, 3])\n >>> nw.from_native(s_native, series_only=True).n_unique()\n 3\n """\n return self._compliant_series.n_unique()\n\n def to_numpy(self) -> _1DArray:\n """Convert to numpy.\n\n Returns:\n NumPy ndarray representation of the Series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 2, 3], name="a")\n >>> nw.from_native(s_native, series_only=True).to_numpy()\n array([1, 2, 3]...)\n """\n return self._compliant_series.to_numpy(None, copy=None)\n\n def to_pandas(self) -> pd.Series[Any]:\n """Convert to pandas Series.\n\n Returns:\n A pandas Series containing the data from this Series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series("a", [1, 2, 3])\n >>> nw.from_native(s_native, series_only=True).to_pandas()\n 0 1\n 1 2\n 2 3\n Name: a, dtype: int64\n """\n return self._compliant_series.to_pandas()\n\n def to_polars(self) -> pl.Series:\n """Convert to polars Series.\n\n Returns:\n A polars Series containing the data from this Series.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3]])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).to_polars() # doctest: +NORMALIZE_WHITESPACE\n shape: (3,)\n Series: '' [i64]\n [\n 1\n 2\n 3\n ]\n """\n return self._compliant_series.to_polars()\n\n def __add__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__add__(self._extract_native(other))\n )\n\n def __radd__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__radd__(self._extract_native(other))\n )\n\n def __sub__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__sub__(self._extract_native(other))\n )\n\n def __rsub__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__rsub__(self._extract_native(other))\n )\n\n def __mul__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__mul__(self._extract_native(other))\n )\n\n def __rmul__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__rmul__(self._extract_native(other))\n )\n\n def __truediv__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__truediv__(self._extract_native(other))\n )\n\n def __rtruediv__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__rtruediv__(self._extract_native(other))\n )\n\n def __floordiv__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__floordiv__(self._extract_native(other))\n )\n\n def __rfloordiv__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__rfloordiv__(self._extract_native(other))\n )\n\n def __pow__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__pow__(self._extract_native(other))\n )\n\n def __rpow__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__rpow__(self._extract_native(other))\n )\n\n def __mod__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__mod__(self._extract_native(other))\n )\n\n def __rmod__(self, other: object) -> Self:\n return self._with_compliant(\n self._compliant_series.__rmod__(self._extract_native(other))\n )\n\n def __eq__(self, other: object) -> Self: # type: ignore[override]\n return self._with_compliant(\n self._compliant_series.__eq__(self._extract_native(other))\n )\n\n def __ne__(self, other: object) -> Self: # type: ignore[override]\n return self._with_compliant(\n self._compliant_series.__ne__(self._extract_native(other))\n )\n\n def __gt__(self, other: Any) -> Self:\n return self._with_compliant(\n self._compliant_series.__gt__(self._extract_native(other))\n )\n\n def __ge__(self, other: Any) -> Self:\n return self._with_compliant(\n self._compliant_series.__ge__(self._extract_native(other))\n )\n\n def __lt__(self, other: Any) -> Self:\n return self._with_compliant(\n self._compliant_series.__lt__(self._extract_native(other))\n )\n\n def __le__(self, other: Any) -> Self:\n return self._with_compliant(\n self._compliant_series.__le__(self._extract_native(other))\n )\n\n def __and__(self, other: Any) -> Self:\n return self._with_compliant(\n self._compliant_series.__and__(self._extract_native(other))\n )\n\n def __rand__(self, other: Any) -> Self:\n return self._with_compliant(\n self._compliant_series.__rand__(self._extract_native(other))\n )\n\n def __or__(self, other: Any) -> Self:\n return self._with_compliant(\n self._compliant_series.__or__(self._extract_native(other))\n )\n\n def __ror__(self, other: Any) -> Self:\n return self._with_compliant(\n self._compliant_series.__ror__(self._extract_native(other))\n )\n\n # unary\n def __invert__(self) -> Self:\n return self._with_compliant(self._compliant_series.__invert__())\n\n def filter(self, predicate: Any) -> Self:\n """Filter elements in the Series based on a condition.\n\n Returns:\n A new Series with elements that satisfy the condition.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([4, 10, 15, 34, 50])\n >>> s_nw = nw.from_native(s_native, series_only=True)\n >>> s_nw.filter(s_nw > 10).to_native()\n 2 15\n 3 34\n 4 50\n dtype: int64\n """\n return self._with_compliant(\n self._compliant_series.filter(self._extract_native(predicate))\n )\n\n # --- descriptive ---\n def is_duplicated(self) -> Self:\n r"""Get a mask of all duplicated rows in the Series.\n\n Returns:\n A new Series with boolean values indicating duplicated rows.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3, 1]])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).is_duplicated().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n true,\n false,\n false,\n true\n ]\n ]\n """\n return ~self.is_unique()\n\n def is_empty(self) -> bool:\n r"""Check if the series is empty.\n\n Returns:\n A boolean indicating if the series is empty.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 2, 3])\n >>> s_nw = nw.from_native(s_native, series_only=True)\n\n >>> s_nw.is_empty()\n False\n >>> s_nw.filter(s_nw > 10).is_empty()\n True\n """\n return self._compliant_series.len() == 0\n\n def is_unique(self) -> Self:\n r"""Get a mask of all unique rows in the Series.\n\n Returns:\n A new Series with boolean values indicating unique rows.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 2, 3, 1])\n >>> nw.from_native(s_native, series_only=True).is_unique().to_native()\n 0 False\n 1 True\n 2 True\n 3 False\n dtype: bool\n """\n return self._with_compliant(self._compliant_series.is_unique())\n\n def null_count(self) -> int:\n r"""Count the number of null values.\n\n Notes:\n pandas handles null values differently from Polars and PyArrow.\n See [null_handling](../concepts/null_handling.md/)\n for reference.\n\n Returns:\n The number of null values in the Series.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, None, None]])\n >>> nw.from_native(s_native, series_only=True).null_count()\n 2\n """\n return self._compliant_series.null_count()\n\n def is_first_distinct(self) -> Self:\n r"""Return a boolean mask indicating the first occurrence of each distinct value.\n\n Returns:\n A new Series with boolean values indicating the first occurrence of each distinct value.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 1, 2, 3, 2])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).is_first_distinct().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (5,)\n Series: '' [bool]\n [\n true\n false\n true\n true\n false\n ]\n """\n return self._with_compliant(self._compliant_series.is_first_distinct())\n\n def is_last_distinct(self) -> Self:\n r"""Return a boolean mask indicating the last occurrence of each distinct value.\n\n Returns:\n A new Series with boolean values indicating the last occurrence of each distinct value.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 1, 2, 3, 2])\n >>> nw.from_native(s_native, series_only=True).is_last_distinct().to_native()\n 0 False\n 1 True\n 2 False\n 3 True\n 4 True\n dtype: bool\n """\n return self._with_compliant(self._compliant_series.is_last_distinct())\n\n def is_sorted(self, *, descending: bool = False) -> bool:\n r"""Check if the Series is sorted.\n\n Arguments:\n descending: Check if the Series is sorted in descending order.\n\n Returns:\n A boolean indicating if the Series is sorted.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[3, 2, 1]])\n >>> s_nw = nw.from_native(s_native, series_only=True)\n\n >>> s_nw.is_sorted(descending=False)\n False\n\n >>> s_nw.is_sorted(descending=True)\n True\n """\n return self._compliant_series.is_sorted(descending=descending)\n\n def value_counts(\n self,\n *,\n sort: bool = False,\n parallel: bool = False,\n name: str | None = None,\n normalize: bool = False,\n ) -> DataFrame[Any]:\n r"""Count the occurrences of unique values.\n\n Arguments:\n sort: Sort the output by count in descending order. If set to False (default),\n the order of the output is random.\n parallel: Execute the computation in parallel. Used for Polars only.\n name: Give the resulting count column a specific name; if `normalize` is True\n defaults to "proportion", otherwise defaults to "count".\n normalize: If true gives relative frequencies of the unique values\n\n Returns:\n A DataFrame with two columns\n\n - The original values as first column\n - Either count or proportion as second column, depending on normalize parameter.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 1, 2, 3, 2], name="s")\n >>> nw.from_native(s_native, series_only=True).value_counts(\n ... sort=True\n ... ).to_native()\n s count\n 0 1 2\n 1 2 2\n 2 3 1\n """\n return self._dataframe(\n self._compliant_series.value_counts(\n sort=sort, parallel=parallel, name=name, normalize=normalize\n ),\n level=self._level,\n )\n\n def quantile(\n self, quantile: float, interpolation: RollingInterpolationMethod\n ) -> float:\n """Get quantile value of the series.\n\n Note:\n pandas and Polars may have implementation differences for a given interpolation method.\n\n Arguments:\n quantile: Quantile between 0.0 and 1.0.\n interpolation: Interpolation method.\n\n Returns:\n The quantile value.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series(list(range(50)))\n >>> s_nw = nw.from_native(s_native, series_only=True)\n >>> [\n ... s_nw.quantile(quantile=q, interpolation="nearest")\n ... for q in (0.1, 0.25, 0.5, 0.75, 0.9)\n ... ]\n [5.0, 12.0, 25.0, 37.0, 44.0]\n """\n return self._compliant_series.quantile(\n quantile=quantile, interpolation=interpolation\n )\n\n def zip_with(self, mask: Self, other: Self) -> Self:\n """Take values from self or other based on the given mask.\n\n Where mask evaluates true, take values from self. Where mask evaluates false,\n take values from other.\n\n Arguments:\n mask: Boolean Series\n other: Series of same type.\n\n Returns:\n A new Series with values selected from self or other based on the mask.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> data_native = pa.chunked_array([[1, 2, 3, 4, 5]])\n >>> other_native = pa.chunked_array([[5, 4, 3, 2, 1]])\n >>> mask_native = pa.chunked_array([[True, False, True, False, True]])\n >>>\n >>> data_nw = nw.from_native(data_native, series_only=True)\n >>> other_nw = nw.from_native(other_native, series_only=True)\n >>> mask_nw = nw.from_native(mask_native, series_only=True)\n >>>\n >>> data_nw.zip_with(mask_nw, other_nw).to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 1,\n 4,\n 3,\n 2,\n 5\n ]\n ]\n """\n return self._with_compliant(\n self._compliant_series.zip_with(\n self._extract_native(mask), self._extract_native(other)\n )\n )\n\n def item(self, index: int | None = None) -> Any:\n r"""Return the Series as a scalar, or return the element at the given index.\n\n If no index is provided, this is equivalent to `s[0]`, with a check\n that the shape is (1,). With an index, this is equivalent to `s[index]`.\n\n Returns:\n The scalar value of the Series or the element at the given index.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> nw.from_native(pl.Series("a", [1]), series_only=True).item()\n 1\n\n >>> nw.from_native(pl.Series("a", [9, 8, 7]), series_only=True).item(-1)\n 7\n """\n return self._compliant_series.item(index=index)\n\n def head(self, n: int = 10) -> Self:\n r"""Get the first `n` rows.\n\n Arguments:\n n: Number of rows to return.\n\n Returns:\n A new Series containing the first n rows.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series(list(range(10)))\n >>> nw.from_native(s_native, series_only=True).head(3).to_native()\n 0 0\n 1 1\n 2 2\n dtype: int64\n """\n return self._with_compliant(self._compliant_series.head(n))\n\n def tail(self, n: int = 10) -> Self:\n r"""Get the last `n` rows.\n\n Arguments:\n n: Number of rows to return.\n\n Returns:\n A new Series with the last n rows.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([list(range(10))])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.tail(3).to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 7,\n 8,\n 9\n ]\n ]\n """\n return self._with_compliant(self._compliant_series.tail(n))\n\n def round(self, decimals: int = 0) -> Self:\n r"""Round underlying floating point data by `decimals` digits.\n\n Arguments:\n decimals: Number of decimals to round by.\n\n Returns:\n A new Series with rounded values.\n\n Notes:\n For values exactly halfway between rounded decimal values pandas behaves differently than Polars and Arrow.\n\n pandas rounds to the nearest even value (e.g. -0.5 and 0.5 round to 0.0, 1.5 and 2.5 round to 2.0, 3.5 and\n 4.5 to 4.0, etc..).\n\n Polars and Arrow round away from 0 (e.g. -0.5 to -1.0, 0.5 to 1.0, 1.5 to 2.0, 2.5 to 3.0, etc..).\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1.12345, 2.56789, 3.901234])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.round(1).to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (3,)\n Series: '' [f64]\n [\n 1.1\n 2.6\n 3.9\n ]\n """\n return self._with_compliant(self._compliant_series.round(decimals))\n\n def to_dummies(\n self, *, separator: str = "_", drop_first: bool = False\n ) -> DataFrame[Any]:\n r"""Get dummy/indicator variables.\n\n Arguments:\n separator: Separator/delimiter used when generating column names.\n drop_first: Remove the first category from the variable being encoded.\n\n Returns:\n A new DataFrame containing the dummy/indicator variables.\n\n Notes:\n pandas and Polars handle null values differently. Polars distinguishes\n between NaN and Null, whereas pandas doesn't.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1, 2, 3], name="a")\n >>> s_nw = nw.from_native(s_native, series_only=True)\n\n >>> s_nw.to_dummies(drop_first=False).to_native()\n a_1 a_2 a_3\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n\n >>> s_nw.to_dummies(drop_first=True).to_native()\n a_2 a_3\n 0 0 0\n 1 1 0\n 2 0 1\n """\n return self._dataframe(\n self._compliant_series.to_dummies(separator=separator, drop_first=drop_first),\n level=self._level,\n )\n\n def gather_every(self, n: int, offset: int = 0) -> Self:\n r"""Take every nth value in the Series and return as new Series.\n\n Arguments:\n n: Gather every *n*-th row.\n offset: Starting index.\n\n Returns:\n A new Series with every nth value starting from the offset.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 2, 3, 4]])\n >>> nw.from_native(s_native, series_only=True).gather_every(\n ... n=2, offset=1\n ... ).to_native() # doctest:+ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 2,\n 4\n ]\n ]\n """\n return self._with_compliant(\n self._compliant_series.gather_every(n=n, offset=offset)\n )\n\n def to_arrow(self) -> pa.Array[Any]:\n r"""Convert to arrow.\n\n Returns:\n A PyArrow Array containing the data from the Series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 2, 3, 4])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).to_arrow() # doctest:+NORMALIZE_WHITESPACE\n <pyarrow.lib.Int64Array object at ...>\n [\n 1,\n 2,\n 3,\n 4\n ]\n """\n return self._compliant_series.to_arrow()\n\n def mode(self) -> Self:\n r"""Compute the most occurring value(s).\n\n Can return multiple values.\n\n Returns:\n A new Series containing the mode(s) (values that appear most frequently).\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series([1, 1, 2, 2, 3])\n >>> nw.from_native(s_native, series_only=True).mode().sort().to_native()\n 0 1\n 1 2\n dtype: int64\n """\n return self._with_compliant(self._compliant_series.mode())\n\n def is_finite(self) -> Self:\n """Returns a boolean Series indicating which values are finite.\n\n Warning:\n Different backend handle null values differently. `is_finite` will return\n False for NaN and Null's in the Dask and pandas non-nullable backend, while\n for Polars, PyArrow and pandas nullable backends null values are kept as such.\n\n Returns:\n Expression of `Boolean` data type.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[float("nan"), float("inf"), 2.0, None]])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).is_finite().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n false,\n false,\n true,\n null\n ]\n ]\n """\n return self._with_compliant(self._compliant_series.is_finite())\n\n def cum_count(self, *, reverse: bool = False) -> Self:\n r"""Return the cumulative count of the non-null values in the series.\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new Series with the cumulative count of non-null values.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series(["x", "k", None, "d"])\n >>> nw.from_native(s_native, series_only=True).cum_count(\n ... reverse=True\n ... ).to_native() # doctest:+NORMALIZE_WHITESPACE\n shape: (4,)\n Series: '' [u32]\n [\n 3\n 2\n 1\n 1\n ]\n """\n return self._with_compliant(self._compliant_series.cum_count(reverse=reverse))\n\n def cum_min(self, *, reverse: bool = False) -> Self:\n r"""Return the cumulative min of the non-null values in the series.\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new Series with the cumulative min of non-null values.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([3, 1, None, 2])\n >>> nw.from_native(s_native, series_only=True).cum_min().to_native()\n 0 3.0\n 1 1.0\n 2 NaN\n 3 1.0\n dtype: float64\n """\n return self._with_compliant(self._compliant_series.cum_min(reverse=reverse))\n\n def cum_max(self, *, reverse: bool = False) -> Self:\n r"""Return the cumulative max of the non-null values in the series.\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new Series with the cumulative max of non-null values.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1, 3, None, 2]])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).cum_max().to_native() # doctest:+ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 1,\n 3,\n null,\n 3\n ]\n ]\n\n """\n return self._with_compliant(self._compliant_series.cum_max(reverse=reverse))\n\n def cum_prod(self, *, reverse: bool = False) -> Self:\n r"""Return the cumulative product of the non-null values in the series.\n\n Arguments:\n reverse: reverse the operation\n\n Returns:\n A new Series with the cumulative product of non-null values.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1, 3, None, 2])\n >>> nw.from_native(\n ... s_native, series_only=True\n ... ).cum_prod().to_native() # doctest:+NORMALIZE_WHITESPACE\n shape: (4,)\n Series: '' [i64]\n [\n 1\n 3\n null\n 6\n ]\n """\n return self._with_compliant(self._compliant_series.cum_prod(reverse=reverse))\n\n def rolling_sum(\n self, window_size: int, *, min_samples: int | None = None, center: bool = False\n ) -> Self:\n """Apply a rolling sum (moving sum) over the values.\n\n A window of length `window_size` will traverse the values. The resulting values\n will be aggregated to their sum.\n\n The window at a given row will include the row itself and the `window_size - 1`\n elements before it.\n\n Arguments:\n window_size: The length of the window in number of elements. It must be a\n strictly positive integer.\n min_samples: The number of values in the window that should be non-null before\n computing a result. If set to `None` (default), it will be set equal to\n `window_size`. If provided, it must be a strictly positive integer, and\n less than or equal to `window_size`\n center: Set the labels at the center of the window.\n\n Returns:\n A new series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1.0, 2.0, 3.0, 4.0])\n >>> nw.from_native(s_native, series_only=True).rolling_sum(\n ... window_size=2\n ... ).to_native()\n 0 NaN\n 1 3.0\n 2 5.0\n 3 7.0\n dtype: float64\n """\n window_size, min_samples_int = _validate_rolling_arguments(\n window_size=window_size, min_samples=min_samples\n )\n\n if len(self) == 0: # pragma: no cover\n return self\n\n return self._with_compliant(\n self._compliant_series.rolling_sum(\n window_size=window_size, min_samples=min_samples_int, center=center\n )\n )\n\n def rolling_mean(\n self, window_size: int, *, min_samples: int | None = None, center: bool = False\n ) -> Self:\n """Apply a rolling mean (moving mean) over the values.\n\n A window of length `window_size` will traverse the values. The resulting values\n will be aggregated to their mean.\n\n The window at a given row will include the row itself and the `window_size - 1`\n elements before it.\n\n Arguments:\n window_size: The length of the window in number of elements. It must be a\n strictly positive integer.\n min_samples: The number of values in the window that should be non-null before\n computing a result. If set to `None` (default), it will be set equal to\n `window_size`. If provided, it must be a strictly positive integer, and\n less than or equal to `window_size`\n center: Set the labels at the center of the window.\n\n Returns:\n A new series.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[1.0, 2.0, 3.0, 4.0]])\n >>> nw.from_native(s_native, series_only=True).rolling_mean(\n ... window_size=2\n ... ).to_native() # doctest:+ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n null,\n 1.5,\n 2.5,\n 3.5\n ]\n ]\n """\n window_size, min_samples = _validate_rolling_arguments(\n window_size=window_size, min_samples=min_samples\n )\n\n if len(self) == 0: # pragma: no cover\n return self\n\n return self._with_compliant(\n self._compliant_series.rolling_mean(\n window_size=window_size, min_samples=min_samples, center=center\n )\n )\n\n def rolling_var(\n self,\n window_size: int,\n *,\n min_samples: int | None = None,\n center: bool = False,\n ddof: int = 1,\n ) -> Self:\n """Apply a rolling variance (moving variance) over the values.\n\n A window of length `window_size` will traverse the values. The resulting values\n will be aggregated to their variance.\n\n The window at a given row will include the row itself and the `window_size - 1`\n elements before it.\n\n Arguments:\n window_size: The length of the window in number of elements. It must be a\n strictly positive integer.\n min_samples: The number of values in the window that should be non-null before\n computing a result. If set to `None` (default), it will be set equal to\n `window_size`. If provided, it must be a strictly positive integer, and\n less than or equal to `window_size`.\n center: Set the labels at the center of the window.\n ddof: Delta Degrees of Freedom; the divisor for a length N window is N - ddof.\n\n Returns:\n A new series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>>\n >>> s_native = pl.Series([1.0, 3.0, 1.0, 4.0])\n >>> nw.from_native(s_native, series_only=True).rolling_var(\n ... window_size=2, min_samples=1\n ... ).to_native() # doctest:+NORMALIZE_WHITESPACE\n shape: (4,)\n Series: '' [f64]\n [\n null\n 2.0\n 2.0\n 4.5\n ]\n """\n window_size, min_samples = _validate_rolling_arguments(\n window_size=window_size, min_samples=min_samples\n )\n\n if len(self) == 0: # pragma: no cover\n return self\n\n return self._with_compliant(\n self._compliant_series.rolling_var(\n window_size=window_size, min_samples=min_samples, center=center, ddof=ddof\n )\n )\n\n def rolling_std(\n self,\n window_size: int,\n *,\n min_samples: int | None = None,\n center: bool = False,\n ddof: int = 1,\n ) -> Self:\n """Apply a rolling standard deviation (moving standard deviation) over the values.\n\n A window of length `window_size` will traverse the values. The resulting values\n will be aggregated to their standard deviation.\n\n The window at a given row will include the row itself and the `window_size - 1`\n elements before it.\n\n Arguments:\n window_size: The length of the window in number of elements. It must be a\n strictly positive integer.\n min_samples: The number of values in the window that should be non-null before\n computing a result. If set to `None` (default), it will be set equal to\n `window_size`. If provided, it must be a strictly positive integer, and\n less than or equal to `window_size`.\n center: Set the labels at the center of the window.\n ddof: Delta Degrees of Freedom; the divisor for a length N window is N - ddof.\n\n Returns:\n A new series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>>\n >>> s_native = pd.Series([1.0, 3.0, 1.0, 4.0])\n >>> nw.from_native(s_native, series_only=True).rolling_std(\n ... window_size=2, min_samples=1\n ... ).to_native()\n 0 NaN\n 1 1.414214\n 2 1.414214\n 3 2.121320\n dtype: float64\n """\n window_size, min_samples = _validate_rolling_arguments(\n window_size=window_size, min_samples=min_samples\n )\n\n if len(self) == 0: # pragma: no cover\n return self\n\n return self._with_compliant(\n self._compliant_series.rolling_std(\n window_size=window_size, min_samples=min_samples, center=center, ddof=ddof\n )\n )\n\n def __iter__(self) -> Iterator[Any]:\n yield from self._compliant_series.__iter__()\n\n def __contains__(self, other: Any) -> bool:\n return self._compliant_series.__contains__(other)\n\n def rank(self, method: RankMethod = "average", *, descending: bool = False) -> Self:\n """Assign ranks to data, dealing with ties appropriately.\n\n Notes:\n The resulting dtype may differ between backends.\n\n Arguments:\n method: The method used to assign ranks to tied elements.\n The following methods are available (default is 'average')\n\n - *"average"*: The average of the ranks that would have been assigned to\n all the tied values is assigned to each value.\n - *"min"*: The minimum of the ranks that would have been assigned to all\n the tied values is assigned to each value. (This is also referred to\n as "competition" ranking.)\n - *"max"*: The maximum of the ranks that would have been assigned to all\n the tied values is assigned to each value.\n - *"dense"*: Like "min", but the rank of the next highest element is\n assigned the rank immediately after those assigned to the tied elements.\n - *"ordinal"*: All values are given a distinct rank, corresponding to the\n order that the values occur in the Series.\n\n descending: Rank in descending order.\n\n Returns:\n A new series with rank data as values.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>>\n >>> s_native = pa.chunked_array([[3, 6, 1, 1, 6]])\n >>> nw.from_native(s_native, series_only=True).rank(\n ... method="dense"\n ... ).to_native() # doctest:+ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 2,\n 3,\n 1,\n 1,\n 3\n ]\n ]\n """\n supported_rank_methods = {"average", "min", "max", "dense", "ordinal"}\n if method not in supported_rank_methods:\n msg = (\n "Ranking method must be one of {'average', 'min', 'max', 'dense', 'ordinal'}. "\n f"Found '{method}'"\n )\n raise ValueError(msg)\n\n return self._with_compliant(\n self._compliant_series.rank(method=method, descending=descending)\n )\n\n def hist(\n self,\n bins: list[float | int] | None = None,\n *,\n bin_count: int | None = None,\n include_breakpoint: bool = True,\n ) -> DataFrame[Any]:\n """Bin values into buckets and count their occurrences.\n\n Warning:\n This functionality is considered **unstable**. It may be changed at any point\n without it being considered a breaking change.\n\n Arguments:\n bins: A monotonically increasing sequence of values.\n bin_count: If no bins provided, this will be used to determine the distance of the bins.\n include_breakpoint: Include a column that shows the intervals as categories.\n\n Returns:\n A new DataFrame containing the counts of values that occur within each passed bin.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series([1, 3, 8, 8, 2, 1, 3], name="a")\n >>> nw.from_native(s_native, series_only=True).hist(bin_count=4)\n ┌────────────────────┐\n | Narwhals DataFrame |\n |--------------------|\n | breakpoint count|\n |0 2.75 3|\n |1 4.50 2|\n |2 6.25 0|\n |3 8.00 2|\n └────────────────────┘\n """\n if bins is not None and bin_count is not None:\n msg = "can only provide one of `bin_count` or `bins`"\n raise ComputeError(msg)\n if bins is None and bin_count is None:\n bin_count = 10 # polars (v1.20) sets bin=10 if neither are provided.\n\n if bins is not None:\n for i in range(1, len(bins)):\n if bins[i - 1] >= bins[i]:\n msg = "bins must increase monotonically"\n raise ComputeError(msg)\n\n return self._dataframe(\n self._compliant_series.hist(\n bins=bins, bin_count=bin_count, include_breakpoint=include_breakpoint\n ),\n level=self._level,\n )\n\n def log(self, base: float = math.e) -> Self:\n r"""Compute the logarithm to a given base.\n\n Arguments:\n base: Given base, defaults to `e`\n\n Returns:\n A new series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series([1, 2, 4], name="a")\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.log(base=2)\n ┌───────────────────────┐\n | Narwhals Series |\n |-----------------------|\n |0 0.0 |\n |1 1.0 |\n |2 2.0 |\n |Name: a, dtype: float64|\n └───────────────────────┘\n """\n return self._with_compliant(self._compliant_series.log(base=base))\n\n def exp(self) -> Self:\n r"""Compute the exponent.\n\n Returns:\n A new series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series([-1, 0, 1], name="a")\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.exp()\n ┌───────────────────────┐\n | Narwhals Series |\n |-----------------------|\n |0 0.367879 |\n |1 1.000000 |\n |2 2.718282 |\n |Name: a, dtype: float64|\n └───────────────────────┘\n """\n return self._with_compliant(self._compliant_series.exp())\n\n def sqrt(self) -> Self:\n r"""Compute the square root.\n\n Returns:\n A new series.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series([1, 4, 9], name="a")\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.sqrt()\n ┌───────────────────────┐\n | Narwhals Series |\n |-----------------------|\n |0 1.0 |\n |1 2.0 |\n |2 3.0 |\n |Name: a, dtype: float64|\n └───────────────────────┘\n """\n return self._with_compliant(self._compliant_series.sqrt())\n\n @property\n def str(self) -> SeriesStringNamespace[Self]:\n return SeriesStringNamespace(self)\n\n @property\n def dt(self) -> SeriesDateTimeNamespace[Self]:\n return SeriesDateTimeNamespace(self)\n\n @property\n def cat(self) -> SeriesCatNamespace[Self]:\n return SeriesCatNamespace(self)\n\n @property\n def list(self) -> SeriesListNamespace[Self]:\n return SeriesListNamespace(self)\n\n @property\n def struct(self) -> SeriesStructNamespace[Self]:\n return SeriesStructNamespace(self)\n
.venv\Lib\site-packages\narwhals\series.py
series.py
Python
90,312
0.75
0.081481
0.003968
node-utils
306
2024-11-17T13:28:50.701808
GPL-3.0
false
b0a915739a7bfa844223c93ddf4041ef
from __future__ import annotations\n\nfrom typing import Generic\n\nfrom narwhals.typing import SeriesT\n\n\nclass SeriesCatNamespace(Generic[SeriesT]):\n def __init__(self, series: SeriesT) -> None:\n self._narwhals_series = series\n\n def get_categories(self) -> SeriesT:\n """Get unique categories from column.\n\n Returns:\n A new Series containing the unique categories.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["apple", "mango", "mango"], dtype="category")\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.cat.get_categories().to_native()\n 0 apple\n 1 mango\n dtype: object\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.cat.get_categories()\n )\n
.venv\Lib\site-packages\narwhals\series_cat.py
series_cat.py
Python
911
0.85
0.1
0
node-utils
617
2025-03-14T23:12:39.706617
BSD-3-Clause
false
e98448fdb69c34124c1d446ad4bf17ea
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Generic\n\nfrom narwhals.typing import SeriesT\n\nif TYPE_CHECKING:\n from narwhals.typing import TimeUnit\n\n\nclass SeriesDateTimeNamespace(Generic[SeriesT]):\n def __init__(self, series: SeriesT) -> None:\n self._narwhals_series = series\n\n def date(self) -> SeriesT:\n """Get the date in a datetime series.\n\n Returns:\n A new Series with the date portion of the datetime values.\n\n Raises:\n NotImplementedError: If pandas default backend is being used.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(\n ... [datetime(2012, 1, 7, 10, 20), datetime(2023, 3, 10, 11, 32)]\n ... ).convert_dtypes(dtype_backend="pyarrow")\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.date().to_native()\n 0 2012-01-07\n 1 2023-03-10\n dtype: date32[day][pyarrow]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.date()\n )\n\n def year(self) -> SeriesT:\n """Get the year in a datetime series.\n\n Returns:\n A new Series containing the year component of each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([datetime(2012, 1, 7), datetime(2023, 3, 10)])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.year().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [i32]\n [\n 2012\n 2023\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.year()\n )\n\n def month(self) -> SeriesT:\n """Gets the month in a datetime series.\n\n Returns:\n A new Series containing the month component of each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series([datetime(2012, 1, 7), datetime(2023, 3, 10)])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.month().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [i8]\n [\n 1\n 3\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.month()\n )\n\n def day(self) -> SeriesT:\n """Extracts the day in a datetime series.\n\n Returns:\n A new Series containing the day component of each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array(\n ... [[datetime(2022, 1, 1), datetime(2022, 1, 5)]]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.day().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 1,\n 5\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.day()\n )\n\n def hour(self) -> SeriesT:\n """Extracts the hour in a datetime series.\n\n Returns:\n A new Series containing the hour component of each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array(\n ... [[datetime(2022, 1, 1, 5, 3), datetime(2022, 1, 5, 9, 12)]]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.hour().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 5,\n 9\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.hour()\n )\n\n def minute(self) -> SeriesT:\n """Extracts the minute in a datetime series.\n\n Returns:\n A new Series containing the minute component of each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(\n ... [datetime(2022, 1, 1, 5, 3), datetime(2022, 1, 5, 9, 12)]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.minute().to_native()\n 0 3\n 1 12\n dtype: int32\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.minute()\n )\n\n def second(self) -> SeriesT:\n """Extracts the seconds in a datetime series.\n\n Returns:\n A new Series containing the second component of each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(\n ... [datetime(2022, 1, 1, 5, 3, 10), datetime(2022, 1, 5, 9, 12, 4)]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.second().to_native()\n 0 10\n 1 4\n dtype: int32\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.second()\n )\n\n def millisecond(self) -> SeriesT:\n """Extracts the milliseconds in a datetime series.\n\n Returns:\n A new Series containing the millisecond component of each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(\n ... [\n ... datetime(2022, 1, 1, 5, 3, 7, 400000),\n ... datetime(2022, 1, 1, 5, 3, 7, 0),\n ... ]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.millisecond().alias("datetime").to_native()\n 0 400\n 1 0\n Name: datetime, dtype: int32\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.millisecond()\n )\n\n def microsecond(self) -> SeriesT:\n """Extracts the microseconds in a datetime series.\n\n Returns:\n A new Series containing the microsecond component of each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(\n ... [\n ... datetime(2022, 1, 1, 5, 3, 7, 400000),\n ... datetime(2022, 1, 1, 5, 3, 7, 0),\n ... ]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.microsecond().alias("datetime").to_native()\n 0 400000\n 1 0\n Name: datetime, dtype: int32\n\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.microsecond()\n )\n\n def nanosecond(self) -> SeriesT:\n """Extract the nanoseconds in a date series.\n\n Returns:\n A new Series containing the nanosecond component of each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(\n ... [\n ... datetime(2022, 1, 1, 5, 3, 7, 400000),\n ... datetime(2022, 1, 1, 5, 3, 7, 0),\n ... ]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.nanosecond().alias("datetime").to_native()\n 0 400000000\n 1 0\n Name: datetime, dtype: int32\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.nanosecond()\n )\n\n def ordinal_day(self) -> SeriesT:\n """Get ordinal day.\n\n Returns:\n A new Series containing the ordinal day (day of year) for each datetime value.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array(\n ... [[datetime(2020, 1, 1), datetime(2020, 8, 3)]]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.ordinal_day().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 1,\n 216\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.ordinal_day()\n )\n\n def weekday(self) -> SeriesT:\n """Extract the week day in a datetime series.\n\n Returns:\n A new Series containing the week day for each datetime value.\n Returns the ISO weekday number where monday = 1 and sunday = 7\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array(\n ... [[datetime(2020, 1, 1), datetime(2020, 8, 3)]]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.weekday().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 3,\n 1\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.weekday()\n )\n\n def total_minutes(self) -> SeriesT:\n """Get total minutes.\n\n Notes:\n The function outputs the total minutes in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` in this case.\n\n Returns:\n A new Series containing the total number of minutes for each timedelta value.\n\n Examples:\n >>> from datetime import timedelta\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(\n ... [timedelta(minutes=10), timedelta(minutes=20, seconds=40)]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.total_minutes().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [i64]\n [\n 10\n 20\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.total_minutes()\n )\n\n def total_seconds(self) -> SeriesT:\n """Get total seconds.\n\n Notes:\n The function outputs the total seconds in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` in this case.\n\n Returns:\n A new Series containing the total number of seconds for each timedelta value.\n\n Examples:\n >>> from datetime import timedelta\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(\n ... [timedelta(minutes=10), timedelta(minutes=20, seconds=40)]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.total_seconds().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [i64]\n [\n 600\n 1240\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.total_seconds()\n )\n\n def total_milliseconds(self) -> SeriesT:\n """Get total milliseconds.\n\n Notes:\n The function outputs the total milliseconds in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` in this case.\n\n Returns:\n A new Series containing the total number of milliseconds for each timedelta value.\n\n Examples:\n >>> from datetime import timedelta\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(\n ... [\n ... timedelta(milliseconds=10),\n ... timedelta(milliseconds=20, microseconds=40),\n ... ]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.total_milliseconds().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [i64]\n [\n 10\n 20\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.total_milliseconds()\n )\n\n def total_microseconds(self) -> SeriesT:\n """Get total microseconds.\n\n Returns:\n A new Series containing the total number of microseconds for each timedelta value.\n\n Notes:\n The function outputs the total microseconds in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` in this case.\n\n Examples:\n >>> from datetime import timedelta\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(\n ... [\n ... timedelta(microseconds=10),\n ... timedelta(milliseconds=1, microseconds=200),\n ... ]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.total_microseconds().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [i64]\n [\n 10\n 1200\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.total_microseconds()\n )\n\n def total_nanoseconds(self) -> SeriesT:\n """Get total nanoseconds.\n\n Notes:\n The function outputs the total nanoseconds in the int dtype by default,\n however, pandas may change the dtype to float when there are missing values,\n consider using `fill_null()` in this case.\n\n Returns:\n A new Series containing the total number of nanoseconds for each timedelta value.\n\n Examples:\n >>> from datetime import datetime\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(\n ... ["2024-01-01 00:00:00.000000001", "2024-01-01 00:00:00.000000002"]\n ... ).str.to_datetime(time_unit="ns")\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.diff().dt.total_nanoseconds().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [i64]\n [\n null\n 1\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.total_nanoseconds()\n )\n\n def to_string(self, format: str) -> SeriesT:\n """Convert a Date/Time/Datetime series into a String series with the given format.\n\n Arguments:\n format: Format string for converting the datetime to string.\n\n Returns:\n A new Series with the datetime values formatted as strings according to the specified format.\n\n Notes:\n Unfortunately, different libraries interpret format directives a bit\n differently.\n\n - Chrono, the library used by Polars, uses `"%.f"` for fractional seconds,\n whereas pandas and Python stdlib use `".%f"`.\n - PyArrow interprets `"%S"` as "seconds, including fractional seconds"\n whereas most other tools interpret it as "just seconds, as 2 digits".\n ---\n Therefore, we make the following adjustments.\n\n - for pandas-like libraries, we replace `"%S.%f"` with `"%S%.f"`.\n - for PyArrow, we replace `"%S.%f"` with `"%S"`.\n ---\n Workarounds like these don't make us happy, and we try to avoid them as\n much as possible, but here we feel like it's the best compromise.\n\n If you just want to format a date/datetime Series as a local datetime\n string, and have it work as consistently as possible across libraries,\n we suggest using:\n\n - `"%Y-%m-%dT%H:%M:%S%.f"` for datetimes\n - `"%Y-%m-%d"` for dates\n ---\n Though note that, even then, different tools may return a different number\n of trailing zeros. Nonetheless, this is probably consistent enough for\n most applications.\n\n If you have an application where this is not enough, please open an issue\n and let us know.\n\n Examples:\n >>> from datetime import datetime\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array(\n ... [[datetime(2020, 3, 1), datetime(2020, 4, 1)]]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.to_string("%Y/%m/%d").to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n "2020/03/01",\n "2020/04/01"\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.to_string(format)\n )\n\n def replace_time_zone(self, time_zone: str | None) -> SeriesT:\n """Replace time zone.\n\n Arguments:\n time_zone: Target time zone.\n\n Returns:\n A new Series with the specified time zone.\n\n Examples:\n >>> from datetime import datetime, timezone\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(\n ... [\n ... datetime(2024, 1, 1, tzinfo=timezone.utc),\n ... datetime(2024, 1, 2, tzinfo=timezone.utc),\n ... ]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.replace_time_zone(\n ... "Asia/Kathmandu"\n ... ).to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [datetime[μs, Asia/Kathmandu]]\n [\n 2024-01-01 00:00:00 +0545\n 2024-01-02 00:00:00 +0545\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.replace_time_zone(time_zone)\n )\n\n def convert_time_zone(self, time_zone: str) -> SeriesT:\n """Convert time zone.\n\n If converting from a time-zone-naive column, then conversion happens\n as if converting from UTC.\n\n Arguments:\n time_zone: Target time zone.\n\n Returns:\n A new Series with the specified time zone.\n\n Examples:\n >>> from datetime import datetime, timezone\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(\n ... [\n ... datetime(2024, 1, 1, tzinfo=timezone.utc),\n ... datetime(2024, 1, 2, tzinfo=timezone.utc),\n ... ]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.convert_time_zone("Asia/Kathmandu").to_native()\n 0 2024-01-01 05:45:00+05:45\n 1 2024-01-02 05:45:00+05:45\n dtype: datetime64[ns, Asia/Kathmandu]\n """\n if time_zone is None:\n msg = "Target `time_zone` cannot be `None` in `convert_time_zone`. Please use `replace_time_zone(None)` if you want to remove the time zone."\n raise TypeError(msg)\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.convert_time_zone(time_zone)\n )\n\n def timestamp(self, time_unit: TimeUnit) -> SeriesT:\n """Return a timestamp in the given time unit.\n\n Arguments:\n time_unit: One of\n - 'ns': nanosecond.\n - 'us': microsecond.\n - 'ms': millisecond.\n\n Returns:\n A new Series with timestamps in the specified time unit.\n\n Examples:\n >>> from datetime import date\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(\n ... [date(2001, 1, 1), None, date(2001, 1, 3)], dtype="datetime64[ns]"\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.timestamp("ms").to_native()\n 0 9.783072e+11\n 1 NaN\n 2 9.784800e+11\n dtype: float64\n """\n if time_unit not in {"ns", "us", "ms"}:\n msg = (\n "invalid `time_unit`"\n f"\n\nExpected one of {{'ns', 'us', 'ms'}}, got {time_unit!r}."\n )\n raise ValueError(msg)\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.timestamp(time_unit)\n )\n\n def truncate(self, every: str) -> SeriesT:\n """Divide the date/datetime range into buckets.\n\n Arguments:\n every: Length of bucket. Must be of form `<multiple><unit>`,\n where `multiple` is a positive integer and `unit` is one of\n\n - 'ns': nanosecond.\n - 'us': microsecond.\n - 'ms': millisecond.\n - 's': second.\n - 'm': minute.\n - 'h': hour.\n - 'd': day.\n - 'mo': month.\n - 'q': quarter.\n - 'y': year.\n\n Returns:\n Series of data type `Date` or `Datetime`.\n\n Examples:\n >>> from datetime import datetime\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series([datetime(2021, 3, 1, 12, 34)])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.dt.truncate("1h").to_native()\n 0 2021-03-01 12:00:00\n dtype: datetime64[ns]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.dt.truncate(every)\n )\n
.venv\Lib\site-packages\narwhals\series_dt.py
series_dt.py
Python
24,109
0.95
0.071742
0
python-kit
687
2025-05-19T04:03:27.033439
Apache-2.0
false
593f403acaf9d6f45da6cd24da4d54b0
from __future__ import annotations\n\nfrom typing import Generic\n\nfrom narwhals.typing import SeriesT\n\n\nclass SeriesListNamespace(Generic[SeriesT]):\n def __init__(self, series: SeriesT) -> None:\n self._narwhals_series = series\n\n def len(self) -> SeriesT:\n """Return the number of elements in each list.\n\n Null values count towards the total.\n\n Returns:\n A new series.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array([[[1, 2], [3, 4, None], None, []]])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.list.len().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 2,\n 3,\n null,\n 0\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.list.len()\n )\n
.venv\Lib\site-packages\narwhals\series_list.py
series_list.py
Python
1,041
0.95
0.078947
0
node-utils
107
2025-03-20T13:33:35.504608
MIT
false
964502ae1e82fffbb622f6504b7e92d6
from __future__ import annotations\n\nfrom typing import Generic\n\nfrom narwhals.typing import SeriesT\n\n\nclass SeriesStringNamespace(Generic[SeriesT]):\n def __init__(self, series: SeriesT) -> None:\n self._narwhals_series = series\n\n def len_chars(self) -> SeriesT:\n r"""Return the length of each string as the number of characters.\n\n Returns:\n A new Series containing the length of each string in characters.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(["foo", "345", None])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.len_chars().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (3,)\n Series: '' [u32]\n [\n 3\n 3\n null\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.len_chars()\n )\n\n def replace(\n self, pattern: str, value: str, *, literal: bool = False, n: int = 1\n ) -> SeriesT:\n r"""Replace first matching regex/literal substring with a new string value.\n\n Arguments:\n pattern: A valid regular expression pattern.\n value: String that will replace the matched substring.\n literal: Treat `pattern` as a literal string.\n n: Number of matches to replace.\n\n Returns:\n A new Series with the regex/literal pattern replaced with the specified value.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["123abc", "abc abc123"])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.replace("abc", "").to_native()\n 0 123\n 1 abc123\n dtype: object\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.replace(\n pattern, value, literal=literal, n=n\n )\n )\n\n def replace_all(self, pattern: str, value: str, *, literal: bool = False) -> SeriesT:\n r"""Replace all matching regex/literal substring with a new string value.\n\n Arguments:\n pattern: A valid regular expression pattern.\n value: String that will replace the matched substring.\n literal: Treat `pattern` as a literal string.\n\n Returns:\n A new Series with all occurrences of pattern replaced with the specified value.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["123abc", "abc abc123"])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.replace_all("abc", "").to_native()\n 0 123\n 1 123\n dtype: object\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.replace_all(\n pattern, value, literal=literal\n )\n )\n\n def strip_chars(self, characters: str | None = None) -> SeriesT:\n r"""Remove leading and trailing characters.\n\n Arguments:\n characters: The set of characters to be removed. All combinations of this set of characters will be stripped from the start and end of the string. If set to None (default), all leading and trailing whitespace is removed instead.\n\n Returns:\n A new Series with leading and trailing characters removed.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(["apple", "\nmango"])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.strip_chars().to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [str]\n [\n "apple"\n "mango"\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.strip_chars(characters)\n )\n\n def starts_with(self, prefix: str) -> SeriesT:\n r"""Check if string values start with a substring.\n\n Arguments:\n prefix: prefix substring\n\n Returns:\n A new Series with boolean values indicating if each string starts with the prefix.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["apple", "mango", None])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.starts_with("app").to_native()\n 0 True\n 1 False\n 2 None\n dtype: object\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.starts_with(prefix)\n )\n\n def ends_with(self, suffix: str) -> SeriesT:\n r"""Check if string values end with a substring.\n\n Arguments:\n suffix: suffix substring\n\n Returns:\n A new Series with boolean values indicating if each string ends with the suffix.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["apple", "mango", None])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.ends_with("ngo").to_native()\n 0 False\n 1 True\n 2 None\n dtype: object\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.ends_with(suffix)\n )\n\n def contains(self, pattern: str, *, literal: bool = False) -> SeriesT:\n r"""Check if string contains a substring that matches a pattern.\n\n Arguments:\n pattern: A Character sequence or valid regular expression pattern.\n literal: If True, treats the pattern as a literal string.\n If False, assumes the pattern is a regular expression.\n\n Returns:\n A new Series with boolean values indicating if each string contains the pattern.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array([["cat", "dog", "rabbit and parrot"]])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.contains("cat|parrot").to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n true,\n false,\n true\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.contains(pattern, literal=literal)\n )\n\n def slice(self, offset: int, length: int | None = None) -> SeriesT:\n r"""Create subslices of the string values of a Series.\n\n Arguments:\n offset: Start index. Negative indexing is supported.\n length: Length of the slice. If set to `None` (default), the slice is taken to the\n end of the string.\n\n Returns:\n A new Series containing subslices of each string.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["pear", None, "papaya"])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.slice(4, 3).to_native() # doctest: +NORMALIZE_WHITESPACE\n 0\n 1 None\n 2 ya\n dtype: object\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.slice(\n offset=offset, length=length\n )\n )\n\n def split(self, by: str) -> SeriesT:\n r"""Split the string values of a Series by a substring.\n\n Arguments:\n by: Substring to split by.\n\n Returns:\n A new Series containing lists of strings.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(["foo bar", "foo_bar"])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.split("_").to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [list[str]]\n [\n ["foo bar"]\n ["foo", "bar"]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.split(by=by)\n )\n\n def head(self, n: int = 5) -> SeriesT:\n r"""Take the first n elements of each string.\n\n Arguments:\n n: Number of elements to take. Negative indexing is supported (see note (1.))\n\n Returns:\n A new Series containing the first n characters of each string.\n\n Notes:\n 1. When the `n` input is negative, `head` returns characters up to the n-th from the end of the string.\n For example, if `n = -3`, then all characters except the last three are returned.\n 2. If the length of the string has fewer than `n` characters, the full string is returned.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array([["taata", "taatatata", "zukkyun"]])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.head().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n "taata",\n "taata",\n "zukky"\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.slice(offset=0, length=n)\n )\n\n def tail(self, n: int = 5) -> SeriesT:\n r"""Take the last n elements of each string.\n\n Arguments:\n n: Number of elements to take. Negative indexing is supported (see note (1.))\n\n Returns:\n A new Series containing the last n characters of each string.\n\n Notes:\n 1. When the `n` input is negative, `tail` returns characters starting from the n-th from the beginning of\n the string. For example, if `n = -3`, then all characters except the first three are returned.\n 2. If the length of the string has fewer than `n` characters, the full string is returned.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array([["taata", "taatatata", "zukkyun"]])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.tail().to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n "taata",\n "atata",\n "kkyun"\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.slice(offset=-n, length=None)\n )\n\n def to_uppercase(self) -> SeriesT:\n r"""Transform string to uppercase variant.\n\n Returns:\n A new Series with values converted to uppercase.\n\n Notes:\n The PyArrow backend will convert 'ß' to 'ẞ' instead of 'SS'.\n For more info see: https://github.com/apache/arrow/issues/34599\n There may be other unicode-edge-case-related variations across implementations.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["apple", None])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.to_uppercase().to_native()\n 0 APPLE\n 1 None\n dtype: object\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.to_uppercase()\n )\n\n def to_lowercase(self) -> SeriesT:\n r"""Transform string to lowercase variant.\n\n Returns:\n A new Series with values converted to lowercase.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["APPLE", None])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.to_lowercase().to_native()\n 0 apple\n 1 None\n dtype: object\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.to_lowercase()\n )\n\n def to_datetime(self, format: str | None = None) -> SeriesT:\n """Parse Series with strings to a Series with Datetime dtype.\n\n Notes:\n - pandas defaults to nanosecond time unit, Polars to microsecond.\n Prior to pandas 2.0, nanoseconds were the only time unit supported\n in pandas, with no ability to set any other one. The ability to\n set the time unit in pandas, if the version permits, will arrive.\n - timezone-aware strings are all converted to and parsed as UTC.\n\n Warning:\n As different backends auto-infer format in different ways, if `format=None`\n there is no guarantee that the result will be equal.\n\n Arguments:\n format: Format to use for conversion. If set to None (default), the format is\n inferred from the data.\n\n Returns:\n A new Series with datetime dtype.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(["2020-01-01", "2020-01-02"])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.to_datetime(\n ... format="%Y-%m-%d"\n ... ).to_native() # doctest: +NORMALIZE_WHITESPACE\n shape: (2,)\n Series: '' [datetime[μs]]\n [\n 2020-01-01 00:00:00\n 2020-01-02 00:00:00\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.to_datetime(format=format)\n )\n\n def to_date(self, format: str | None = None) -> SeriesT:\n """Convert to date dtype.\n\n Warning:\n As different backends auto-infer format in different ways, if `format=None`\n there is no guarantee that the result will be equal.\n\n Arguments:\n format: Format to use for conversion. If set to None (default), the format is\n inferred from the data.\n\n Returns:\n A new expression.\n\n Examples:\n >>> import pyarrow as pa\n >>> import narwhals as nw\n >>> s_native = pa.chunked_array([["2020-01-01", "2020-01-02"]])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.to_date(format="%Y-%m-%d").to_native() # doctest: +ELLIPSIS\n <pyarrow.lib.ChunkedArray object at ...>\n [\n [\n 2020-01-01,\n 2020-01-02\n ]\n ]\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.to_date(format=format)\n )\n\n def zfill(self, width: int) -> SeriesT:\n r"""Pad strings with zeros on the left.\n\n Arguments:\n width: The target width of the string. If the string is shorter than this width, it will be padded with zeros on the left.\n\n Returns:\n A new Series with strings padded with zeros on the left.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> s_native = pd.Series(["+1", "-23", "456", "123456"])\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.str.zfill(5).to_native()\n 0 +0001\n 1 -0023\n 2 00456\n 3 123456\n dtype: object\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.str.zfill(width)\n )\n
.venv\Lib\site-packages\narwhals\series_str.py
series_str.py
Python
16,565
0.95
0.067834
0
node-utils
82
2025-06-11T05:36:32.402254
BSD-3-Clause
false
69b182420ad0c4c9baf2ccd9f2ed680e
from __future__ import annotations\n\nfrom typing import Generic\n\nfrom narwhals.typing import SeriesT\n\n\nclass SeriesStructNamespace(Generic[SeriesT]):\n def __init__(self, series: SeriesT) -> None:\n self._narwhals_series = series\n\n def field(self, name: str) -> SeriesT:\n r"""Retrieve a Struct field as a new expression.\n\n Arguments:\n name: Name of the struct field to retrieve.\n\n Returns:\n A new Series.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> s_native = pl.Series(\n ... [{"id": "0", "name": "john"}, {"id": "1", "name": "jane"}]\n ... )\n >>> s = nw.from_native(s_native, series_only=True)\n >>> s.struct.field("name").to_list()\n ['john', 'jane']\n """\n return self._narwhals_series._with_compliant(\n self._narwhals_series._compliant_series.struct.field(name)\n )\n
.venv\Lib\site-packages\narwhals\series_struct.py
series_struct.py
Python
974
0.85
0.090909
0
python-kit
72
2023-09-23T18:39:30.857306
MIT
false
76408b1545a807ab40d09d0413e30f04
# ruff: noqa\nZEN = """\\n⣿⣿⣿⣿⣿⠘⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ THE ZEN OF NARWHALS\n⣿⣿⣿⣿⣿⠠⢹⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ Keep it simple\n⣿⣿⣿⣿⣿⡀⡄⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ Move slowly and keep things working\n⣿⣿⣿⣿⣿⡇⡼⡘⠛⠿⠿⠿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ A good API is an honest one\n⣿⣿⣿⡿⣫⡄⠾⣣⠹⣿⣿⣿⣶⣮⣙⠻⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ Yes, that needs documenting\n⣿⣿⢋⣴⣿⣷⣬⣭⣾⣿⣿⣿⣿⣿⣿⣿⣦⡙⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ People learn better from examples\n⣿⢃⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⡌⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ than from explanations⠀\n⡏⠀⢰⠄⢻⣿⣿⣿⣿⡿⠋⢉⠻⣿⣿⣿⣿⣿⣿⡜⣿⣿⡿⢁⢻⣿⣿⣿⣿⣿ If in doubt, better to say 'no'\n⡇⣌⣀⣠⣾⣿⣿⣿⣿⣇⠶⠉⢁⣿⣿⣿⣿⣿⣿⣧⡹⣿⡇⣿⣧⠻⠿⠿⠿⠿ than to risk causing a commotion⠀\n⡧⢹⣿⣿⣿⣜⣟⣸⣿⣿⣷⣶⣿⡿⣿⣿⣝⢿⣿⣿⣷⣬⣥⣿⣿⣿⣿⣿⡟⣰ Yes, we need a test for that\n⢡⣆⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣧⡙⣿⣿⡇⣿⣿⣿⣿⠟⣋⣭⣛⠻⣋⣴⣿ If you want users \n⣶⣤⣤⣙⠻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣦⣍⣡⣿⡿⢋⣴⣿⣿⣿⣿⣿⣿⣿⣿ you need good docs⠀\n⣿⣿⣿⣿⣿⣶⣬⣙⣛⠻⠿⠿⠿⠿⠿⠟⣛⣩⣥⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ Our code is not irreplaceable"""\n\nprint(ZEN)\n
.venv\Lib\site-packages\narwhals\this.py
this.py
Python
1,584
0.8
0.058824
0.0625
awesome-app
839
2024-04-02T06:50:52.636986
BSD-3-Clause
false
b84cfc7ce6c2e30da5d2ea1bfee48a96
from __future__ import annotations\n\nimport datetime as dt\nfrom decimal import Decimal\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Any, Callable, Literal, TypeVar, overload\n\nfrom narwhals._constants import EPOCH, MS_PER_SECOND\nfrom narwhals._namespace import (\n is_native_arrow,\n is_native_pandas_like,\n is_native_polars,\n is_native_spark_like,\n)\nfrom narwhals._utils import Version\nfrom narwhals.dependencies import (\n get_dask,\n get_dask_expr,\n get_numpy,\n get_pandas,\n is_cupy_scalar,\n is_dask_dataframe,\n is_duckdb_relation,\n is_ibis_table,\n is_numpy_scalar,\n is_pandas_like_dataframe,\n is_polars_lazyframe,\n is_polars_series,\n is_pyarrow_scalar,\n is_pyarrow_table,\n)\n\nif TYPE_CHECKING:\n from narwhals.dataframe import DataFrame, LazyFrame\n from narwhals.series import Series\n from narwhals.typing import (\n DataFrameT,\n IntoDataFrameT,\n IntoFrame,\n IntoFrameT,\n IntoLazyFrameT,\n IntoSeries,\n IntoSeriesT,\n LazyFrameT,\n SeriesT,\n )\n\nT = TypeVar("T")\n\nNON_TEMPORAL_SCALAR_TYPES = (bool, bytes, str, int, float, complex, Decimal)\nTEMPORAL_SCALAR_TYPES = (dt.date, dt.timedelta, dt.time)\n\n\n@overload\ndef to_native(\n narwhals_object: DataFrame[IntoDataFrameT], *, pass_through: Literal[False] = ...\n) -> IntoDataFrameT: ...\n@overload\ndef to_native(\n narwhals_object: LazyFrame[IntoFrameT], *, pass_through: Literal[False] = ...\n) -> IntoFrameT: ...\n@overload\ndef to_native(\n narwhals_object: Series[IntoSeriesT], *, pass_through: Literal[False] = ...\n) -> IntoSeriesT: ...\n@overload\ndef to_native(narwhals_object: Any, *, pass_through: bool) -> Any: ...\n\n\ndef to_native(\n narwhals_object: DataFrame[IntoDataFrameT]\n | LazyFrame[IntoFrameT]\n | Series[IntoSeriesT],\n *,\n strict: bool | None = None,\n pass_through: bool | None = None,\n) -> IntoDataFrameT | IntoFrameT | IntoSeriesT | Any:\n """Convert Narwhals object to native one.\n\n Arguments:\n narwhals_object: Narwhals object.\n strict: Determine what happens if `narwhals_object` isn't a Narwhals class\n\n - `True` (default): raise an error\n - `False`: pass object through as-is\n\n *Deprecated* (v1.13.0)\n\n Please use `pass_through` instead. Note that `strict` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n pass_through: Determine what happens if `narwhals_object` isn't a Narwhals class\n\n - `False` (default): raise an error\n - `True`: pass object through as-is\n\n Returns:\n Object of class that user started with.\n """\n from narwhals._utils import validate_strict_and_pass_though\n from narwhals.dataframe import BaseFrame\n from narwhals.series import Series\n\n pass_through = validate_strict_and_pass_though(\n strict, pass_through, pass_through_default=False, emit_deprecation_warning=True\n )\n\n if isinstance(narwhals_object, BaseFrame):\n return narwhals_object._compliant_frame._native_frame\n if isinstance(narwhals_object, Series):\n return narwhals_object._compliant_series.native\n\n if not pass_through:\n msg = f"Expected Narwhals object, got {type(narwhals_object)}."\n raise TypeError(msg)\n return narwhals_object\n\n\n@overload\ndef from_native(native_object: SeriesT, **kwds: Any) -> SeriesT: ...\n\n\n@overload\ndef from_native(native_object: DataFrameT, **kwds: Any) -> DataFrameT: ...\n\n\n@overload\ndef from_native(native_object: LazyFrameT, **kwds: Any) -> LazyFrameT: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT | IntoSeriesT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[IntoDataFrameT] | Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: T,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> T: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: T,\n *,\n pass_through: Literal[True],\n eager_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> T: ...\n\n\n@overload\ndef from_native(\n native_object: IntoFrameT | IntoLazyFrameT | IntoSeriesT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[IntoFrameT] | LazyFrame[IntoLazyFrameT] | Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoSeriesT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n series_only: Literal[True],\n allow_series: None = ...,\n) -> Series[IntoSeriesT]: ...\n\n\n# NOTE: Seems like `mypy` is giving a false positive\n# Following this advice will introduce overlapping overloads?\n# > note: Flipping the order of overloads will fix this error\n@overload\ndef from_native( # type: ignore[overload-overlap]\n native_object: IntoLazyFrameT,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> LazyFrame[IntoLazyFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoFrame | IntoSeries,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[Any] | LazyFrame[Any] | Series[Any]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoSeriesT,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[False] = ...,\n series_only: Literal[True],\n allow_series: None = ...,\n) -> Series[IntoSeriesT]: ...\n\n\n# All params passed in as variables\n@overload\ndef from_native(\n native_object: Any,\n *,\n pass_through: bool,\n eager_only: bool,\n series_only: bool,\n allow_series: bool | None,\n) -> Any: ...\n\n\ndef from_native( # noqa: D417\n native_object: IntoLazyFrameT | IntoFrameT | IntoSeriesT | IntoFrame | IntoSeries | T,\n *,\n strict: bool | None = None,\n pass_through: bool | None = None,\n eager_only: bool = False,\n series_only: bool = False,\n allow_series: bool | None = None,\n **kwds: Any,\n) -> LazyFrame[IntoLazyFrameT] | DataFrame[IntoFrameT] | Series[IntoSeriesT] | T:\n """Convert `native_object` to Narwhals Dataframe, Lazyframe, or Series.\n\n Arguments:\n native_object: Raw object from user.\n Depending on the other arguments, input object can be\n\n - a Dataframe / Lazyframe / Series supported by Narwhals (pandas, Polars, PyArrow, ...)\n - an object which implements `__narwhals_dataframe__`, `__narwhals_lazyframe__`,\n or `__narwhals_series__`\n strict: Determine what happens if the object can't be converted to Narwhals\n\n - `True` or `None` (default): raise an error\n - `False`: pass object through as-is\n\n *Deprecated* (v1.13.0)\n\n Please use `pass_through` instead. Note that `strict` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n pass_through: Determine what happens if the object can't be converted to Narwhals\n\n - `False` or `None` (default): raise an error\n - `True`: pass object through as-is\n eager_only: Whether to only allow eager objects\n\n - `False` (default): don't require `native_object` to be eager\n - `True`: only convert to Narwhals if `native_object` is eager\n series_only: Whether to only allow Series\n\n - `False` (default): don't require `native_object` to be a Series\n - `True`: only convert to Narwhals if `native_object` is a Series\n allow_series: Whether to allow Series (default is only Dataframe / Lazyframe)\n\n - `False` or `None` (default): don't convert to Narwhals if `native_object` is a Series\n - `True`: allow `native_object` to be a Series\n\n Returns:\n DataFrame, LazyFrame, Series, or original object, depending\n on which combination of parameters was passed.\n """\n from narwhals._utils import validate_strict_and_pass_though\n\n pass_through = validate_strict_and_pass_though(\n strict, pass_through, pass_through_default=False, emit_deprecation_warning=True\n )\n if kwds:\n msg = f"from_native() got an unexpected keyword argument {next(iter(kwds))!r}"\n raise TypeError(msg)\n\n return _from_native_impl( # type: ignore[no-any-return]\n native_object,\n pass_through=pass_through,\n eager_only=eager_only,\n eager_or_interchange_only=False,\n series_only=series_only,\n allow_series=allow_series,\n version=Version.MAIN,\n )\n\n\ndef _from_native_impl( # noqa: C901, PLR0911, PLR0912, PLR0915\n native_object: Any,\n *,\n pass_through: bool = False,\n eager_only: bool = False,\n # Interchange-level was removed after v1\n eager_or_interchange_only: bool = False,\n series_only: bool = False,\n allow_series: bool | None = None,\n version: Version,\n) -> Any:\n from narwhals._utils import (\n _supports_dataframe_interchange,\n is_compliant_dataframe,\n is_compliant_lazyframe,\n is_compliant_series,\n parse_version,\n )\n from narwhals.dataframe import DataFrame, LazyFrame\n from narwhals.series import Series\n\n # Early returns\n if isinstance(native_object, (DataFrame, LazyFrame)) and not series_only:\n return native_object\n if isinstance(native_object, Series) and (series_only or allow_series):\n return native_object\n\n if series_only:\n if allow_series is False:\n msg = "Invalid parameter combination: `series_only=True` and `allow_series=False`"\n raise ValueError(msg)\n allow_series = True\n if eager_only and eager_or_interchange_only:\n msg = "Invalid parameter combination: `eager_only=True` and `eager_or_interchange_only=True`"\n raise ValueError(msg)\n\n # Extensions\n if is_compliant_dataframe(native_object):\n if series_only:\n if not pass_through:\n msg = "Cannot only use `series_only` with dataframe"\n raise TypeError(msg)\n return native_object\n return version.dataframe(\n native_object.__narwhals_dataframe__()._with_version(version), level="full"\n )\n elif is_compliant_lazyframe(native_object):\n if series_only:\n if not pass_through:\n msg = "Cannot only use `series_only` with lazyframe"\n raise TypeError(msg)\n return native_object\n if eager_only or eager_or_interchange_only:\n if not pass_through:\n msg = "Cannot only use `eager_only` or `eager_or_interchange_only` with lazyframe"\n raise TypeError(msg)\n return native_object\n return version.lazyframe(\n native_object.__narwhals_lazyframe__()._with_version(version), level="full"\n )\n elif is_compliant_series(native_object):\n if not allow_series:\n if not pass_through:\n msg = "Please set `allow_series=True` or `series_only=True`"\n raise TypeError(msg)\n return native_object\n return version.series(\n native_object.__narwhals_series__()._with_version(version), level="full"\n )\n\n # Polars\n elif is_native_polars(native_object):\n if series_only and not is_polars_series(native_object):\n if not pass_through:\n msg = f"Cannot only use `series_only` with {type(native_object).__qualname__}"\n raise TypeError(msg)\n return native_object\n if (eager_only or eager_or_interchange_only) and is_polars_lazyframe(\n native_object\n ):\n if not pass_through:\n msg = "Cannot only use `eager_only` or `eager_or_interchange_only` with polars.LazyFrame"\n raise TypeError(msg)\n return native_object\n if (not allow_series) and is_polars_series(native_object):\n if not pass_through:\n msg = "Please set `allow_series=True` or `series_only=True`"\n raise TypeError(msg)\n return native_object\n return (\n version.namespace.from_native_object(native_object)\n .compliant.from_native(native_object)\n .to_narwhals()\n )\n\n # PandasLike\n elif is_native_pandas_like(native_object):\n if is_pandas_like_dataframe(native_object):\n if series_only:\n if not pass_through:\n msg = f"Cannot only use `series_only` with {type(native_object).__qualname__}"\n raise TypeError(msg)\n return native_object\n elif not allow_series:\n if not pass_through:\n msg = "Please set `allow_series=True` or `series_only=True`"\n raise TypeError(msg)\n return native_object\n return (\n version.namespace.from_native_object(native_object)\n .compliant.from_native(native_object)\n .to_narwhals()\n )\n\n # PyArrow\n elif is_native_arrow(native_object):\n if is_pyarrow_table(native_object):\n if series_only:\n if not pass_through:\n msg = f"Cannot only use `series_only` with {type(native_object).__qualname__}"\n raise TypeError(msg)\n return native_object\n elif not allow_series:\n if not pass_through:\n msg = "Please set `allow_series=True` or `series_only=True`"\n raise TypeError(msg)\n return native_object\n return (\n version.namespace.from_native_object(native_object)\n .compliant.from_native(native_object)\n .to_narwhals()\n )\n\n # Dask\n elif is_dask_dataframe(native_object):\n from narwhals._dask.namespace import DaskNamespace\n\n if series_only:\n if not pass_through:\n msg = "Cannot only use `series_only` with dask DataFrame"\n raise TypeError(msg)\n return native_object\n if eager_only or eager_or_interchange_only:\n if not pass_through:\n msg = "Cannot only use `eager_only` or `eager_or_interchange_only` with dask DataFrame"\n raise TypeError(msg)\n return native_object\n dask_version = parse_version(get_dask())\n if dask_version <= (2024, 12, 1) and get_dask_expr() is None: # pragma: no cover\n msg = "Please install dask-expr"\n raise ImportError(msg)\n return (\n DaskNamespace(backend_version=dask_version, version=version)\n .from_native(native_object)\n .to_narwhals()\n )\n\n # DuckDB\n elif is_duckdb_relation(native_object):\n if eager_only or series_only: # pragma: no cover\n if not pass_through:\n msg = "Cannot only use `series_only=True` or `eager_only=False` with DuckDBPyRelation"\n raise TypeError(msg)\n return native_object\n return (\n version.namespace.from_native_object(native_object)\n .compliant.from_native(native_object)\n .to_narwhals()\n )\n\n # Ibis\n elif is_ibis_table(native_object):\n if eager_only or series_only: # pragma: no cover\n if not pass_through:\n msg = "Cannot only use `series_only=True` or `eager_only=False` with ibis.Table"\n raise TypeError(msg)\n return native_object\n return (\n version.namespace.from_native_object(native_object)\n .compliant.from_native(native_object)\n .to_narwhals()\n )\n\n # PySpark\n elif is_native_spark_like(native_object): # pragma: no cover\n ns_spark = version.namespace.from_native_object(native_object)\n if series_only or eager_only or eager_or_interchange_only:\n if not pass_through:\n msg = (\n "Cannot only use `series_only`, `eager_only` or `eager_or_interchange_only` "\n f"with {ns_spark.implementation} DataFrame"\n )\n raise TypeError(msg)\n return native_object\n return ns_spark.compliant.from_native(native_object).to_narwhals()\n\n # Interchange protocol\n elif _supports_dataframe_interchange(native_object):\n from narwhals._interchange.dataframe import InterchangeFrame\n\n if eager_only or series_only:\n if not pass_through:\n msg = (\n "Cannot only use `series_only=True` or `eager_only=False` "\n "with object which only implements __dataframe__"\n )\n raise TypeError(msg)\n return native_object\n if version is not Version.V1:\n if pass_through:\n return native_object\n msg = (\n "The Dataframe Interchange Protocol is no longer supported in the main `narwhals` namespace.\n\n"\n "You may want to:\n"\n " - Use `narwhals.stable.v1`, where it is still supported.\n"\n " - See https://narwhals-dev.github.io/narwhals/backcompat\n"\n " - Use `pass_through=True` to pass the object through without raising."\n )\n raise TypeError(msg)\n return Version.V1.dataframe(InterchangeFrame(native_object), level="interchange")\n\n elif not pass_through:\n msg = f"Expected pandas-like dataframe, Polars dataframe, or Polars lazyframe, got: {type(native_object)}"\n raise TypeError(msg)\n return native_object\n\n\ndef get_native_namespace(\n *obj: DataFrame[Any] | LazyFrame[Any] | Series[Any] | IntoFrame | IntoSeries,\n) -> Any:\n """Get native namespace from object.\n\n Arguments:\n obj: Dataframe, Lazyframe, or Series. Multiple objects can be\n passed positionally, in which case they must all have the\n same native namespace (else an error is raised).\n\n Returns:\n Native module.\n\n Examples:\n >>> import polars as pl\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df = nw.from_native(pd.DataFrame({"a": [1, 2, 3]}))\n >>> nw.get_native_namespace(df)\n <module 'pandas'...>\n >>> df = nw.from_native(pl.DataFrame({"a": [1, 2, 3]}))\n >>> nw.get_native_namespace(df)\n <module 'polars'...>\n """\n if not obj:\n msg = "At least one object must be passed to `get_native_namespace`."\n raise ValueError(msg)\n result = {_get_native_namespace_single_obj(x) for x in obj}\n if len(result) != 1:\n msg = f"Found objects with different native namespaces: {result}."\n raise ValueError(msg)\n return result.pop()\n\n\ndef _get_native_namespace_single_obj(\n obj: DataFrame[Any] | LazyFrame[Any] | Series[Any] | IntoFrame | IntoSeries,\n) -> Any:\n from contextlib import suppress\n\n from narwhals._utils import has_native_namespace\n\n with suppress(TypeError, AssertionError):\n return Version.MAIN.namespace.from_native_object(\n obj\n ).implementation.to_native_namespace()\n\n if has_native_namespace(obj):\n return obj.__native_namespace__()\n msg = f"Could not get native namespace from object of type: {type(obj)}"\n raise TypeError(msg)\n\n\ndef narwhalify(\n func: Callable[..., Any] | None = None,\n *,\n strict: bool | None = None,\n pass_through: bool | None = None,\n eager_only: bool = False,\n series_only: bool = False,\n allow_series: bool | None = True,\n) -> Callable[..., Any]:\n """Decorate function so it becomes dataframe-agnostic.\n\n This will try to convert any dataframe/series-like object into the Narwhals\n respective DataFrame/Series, while leaving the other parameters as they are.\n Similarly, if the output of the function is a Narwhals DataFrame or Series, it will be\n converted back to the original dataframe/series type, while if the output is another\n type it will be left as is.\n By setting `pass_through=False`, then every input and every output will be required to be a\n dataframe/series-like object.\n\n Arguments:\n func: Function to wrap in a `from_native`-`to_native` block.\n strict: Determine what happens if the object can't be converted to Narwhals\n\n *Deprecated* (v1.13.0)\n\n Please use `pass_through` instead. Note that `strict` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n - `True` or `None` (default): raise an error\n - `False`: pass object through as-is\n pass_through: Determine what happens if the object can't be converted to Narwhals\n\n - `False` or `None` (default): raise an error\n - `True`: pass object through as-is\n eager_only: Whether to only allow eager objects\n\n - `False` (default): don't require `native_object` to be eager\n - `True`: only convert to Narwhals if `native_object` is eager\n series_only: Whether to only allow Series\n\n - `False` (default): don't require `native_object` to be a Series\n - `True`: only convert to Narwhals if `native_object` is a Series\n allow_series: Whether to allow Series (default is only Dataframe / Lazyframe)\n\n - `False` or `None`: don't convert to Narwhals if `native_object` is a Series\n - `True` (default): allow `native_object` to be a Series\n\n Returns:\n Decorated function.\n\n Examples:\n Instead of writing\n\n >>> import narwhals as nw\n >>> def agnostic_group_by_sum(df):\n ... df = nw.from_native(df, pass_through=True)\n ... df = df.group_by("a").agg(nw.col("b").sum())\n ... return nw.to_native(df)\n\n you can just write\n\n >>> @nw.narwhalify\n ... def agnostic_group_by_sum(df):\n ... return df.group_by("a").agg(nw.col("b").sum())\n """\n from narwhals._utils import validate_strict_and_pass_though\n\n pass_through = validate_strict_and_pass_though(\n strict, pass_through, pass_through_default=True, emit_deprecation_warning=True\n )\n\n def decorator(func: Callable[..., Any]) -> Callable[..., Any]:\n @wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n args = [\n from_native(\n arg,\n pass_through=pass_through,\n eager_only=eager_only,\n series_only=series_only,\n allow_series=allow_series,\n )\n for arg in args\n ] # type: ignore[assignment]\n\n kwargs = {\n name: from_native(\n value,\n pass_through=pass_through,\n eager_only=eager_only,\n series_only=series_only,\n allow_series=allow_series,\n )\n for name, value in kwargs.items()\n }\n\n backends = {\n b()\n for v in (*args, *kwargs.values())\n if (b := getattr(v, "__native_namespace__", None))\n }\n\n if len(backends) > 1:\n msg = "Found multiple backends. Make sure that all dataframe/series inputs come from the same backend."\n raise ValueError(msg)\n\n result = func(*args, **kwargs)\n\n return to_native(result, pass_through=pass_through)\n\n return wrapper\n\n if func is None:\n return decorator\n else:\n # If func is not None, it means the decorator is used without arguments\n return decorator(func)\n\n\ndef to_py_scalar(scalar_like: Any) -> Any:\n """If a scalar is not Python native, converts it to Python native.\n\n Arguments:\n scalar_like: Scalar-like value.\n\n Returns:\n Python scalar.\n\n Raises:\n ValueError: If the object is not convertible to a scalar.\n\n Examples:\n >>> import narwhals as nw\n >>> import pandas as pd\n >>> df = nw.from_native(pd.DataFrame({"a": [1, 2, 3]}))\n >>> nw.to_py_scalar(df["a"].item(0))\n 1\n >>> import pyarrow as pa\n >>> df = nw.from_native(pa.table({"a": [1, 2, 3]}))\n >>> nw.to_py_scalar(df["a"].item(0))\n 1\n >>> nw.to_py_scalar(1)\n 1\n """\n scalar: Any\n pd = get_pandas()\n if scalar_like is None or isinstance(scalar_like, NON_TEMPORAL_SCALAR_TYPES):\n scalar = scalar_like\n elif (\n (np := get_numpy())\n and isinstance(scalar_like, np.datetime64)\n and scalar_like.dtype == "datetime64[ns]"\n ):\n ms = scalar_like.item() // MS_PER_SECOND\n scalar = EPOCH + dt.timedelta(microseconds=ms)\n elif is_numpy_scalar(scalar_like) or is_cupy_scalar(scalar_like):\n scalar = scalar_like.item()\n elif pd and isinstance(scalar_like, pd.Timestamp):\n scalar = scalar_like.to_pydatetime()\n elif pd and isinstance(scalar_like, pd.Timedelta):\n scalar = scalar_like.to_pytimedelta()\n # pd.Timestamp and pd.Timedelta subclass datetime and timedelta,\n # so we need to check this separately\n elif isinstance(scalar_like, TEMPORAL_SCALAR_TYPES):\n scalar = scalar_like\n elif _is_pandas_na(scalar_like):\n scalar = None\n elif is_pyarrow_scalar(scalar_like):\n scalar = scalar_like.as_py()\n else:\n msg = (\n f"Expected object convertible to a scalar, found {type(scalar_like)}.\n"\n f"{scalar_like!r}"\n )\n raise ValueError(msg)\n return scalar\n\n\ndef _is_pandas_na(obj: Any) -> bool:\n return bool((pd := get_pandas()) and pd.api.types.is_scalar(obj) and pd.isna(obj))\n\n\n__all__ = ["get_native_namespace", "narwhalify", "to_native", "to_py_scalar"]\n
.venv\Lib\site-packages\narwhals\translate.py
translate.py
Python
27,442
0.95
0.144444
0.058394
react-lib
418
2023-08-21T02:12:20.588215
Apache-2.0
false
97c8d96a7c436864e73f3cb08254526d
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Literal, Protocol, TypeVar, Union\n\nfrom narwhals._compliant import CompliantDataFrame, CompliantLazyFrame, CompliantSeries\n\nif TYPE_CHECKING:\n import datetime as dt\n from collections.abc import Iterable, Sequence, Sized\n from decimal import Decimal\n from types import ModuleType\n\n import numpy as np\n from typing_extensions import TypeAlias\n\n from narwhals import dtypes\n from narwhals.dataframe import DataFrame, LazyFrame\n from narwhals.expr import Expr\n from narwhals.series import Series\n\n # All dataframes supported by Narwhals have a\n # `columns` property. Their similarities don't extend\n # _that_ much further unfortunately...\n class NativeFrame(Protocol):\n @property\n def columns(self) -> Any: ...\n\n def join(self, *args: Any, **kwargs: Any) -> Any: ...\n\n class NativeLazyFrame(NativeFrame, Protocol):\n def explain(self, *args: Any, **kwargs: Any) -> Any: ...\n\n class NativeSeries(Sized, Iterable[Any], Protocol):\n def filter(self, *args: Any, **kwargs: Any) -> Any: ...\n\n class DataFrameLike(Protocol):\n def __dataframe__(self, *args: Any, **kwargs: Any) -> Any: ...\n\n class SupportsNativeNamespace(Protocol):\n def __native_namespace__(self) -> ModuleType: ...\n\n # ruff: noqa: N802\n class DTypes(Protocol):\n @property\n def Decimal(self) -> type[dtypes.Decimal]: ...\n @property\n def Int128(self) -> type[dtypes.Int128]: ...\n @property\n def Int64(self) -> type[dtypes.Int64]: ...\n @property\n def Int32(self) -> type[dtypes.Int32]: ...\n @property\n def Int16(self) -> type[dtypes.Int16]: ...\n @property\n def Int8(self) -> type[dtypes.Int8]: ...\n @property\n def UInt128(self) -> type[dtypes.UInt128]: ...\n @property\n def UInt64(self) -> type[dtypes.UInt64]: ...\n @property\n def UInt32(self) -> type[dtypes.UInt32]: ...\n @property\n def UInt16(self) -> type[dtypes.UInt16]: ...\n @property\n def UInt8(self) -> type[dtypes.UInt8]: ...\n @property\n def Float64(self) -> type[dtypes.Float64]: ...\n @property\n def Float32(self) -> type[dtypes.Float32]: ...\n @property\n def String(self) -> type[dtypes.String]: ...\n @property\n def Boolean(self) -> type[dtypes.Boolean]: ...\n @property\n def Object(self) -> type[dtypes.Object]: ...\n @property\n def Categorical(self) -> type[dtypes.Categorical]: ...\n @property\n def Enum(self) -> type[dtypes.Enum]: ...\n @property\n def Datetime(self) -> type[dtypes.Datetime]: ...\n @property\n def Duration(self) -> type[dtypes.Duration]: ...\n @property\n def Date(self) -> type[dtypes.Date]: ...\n @property\n def Field(self) -> type[dtypes.Field]: ...\n @property\n def Struct(self) -> type[dtypes.Struct]: ...\n @property\n def List(self) -> type[dtypes.List]: ...\n @property\n def Array(self) -> type[dtypes.Array]: ...\n @property\n def Unknown(self) -> type[dtypes.Unknown]: ...\n @property\n def Time(self) -> type[dtypes.Time]: ...\n @property\n def Binary(self) -> type[dtypes.Binary]: ...\n\n\nIntoExpr: TypeAlias = Union["Expr", str, "Series[Any]"]\n"""Anything which can be converted to an expression.\n\nUse this to mean "either a Narwhals expression, or something which can be converted\ninto one". For example, `exprs` in `DataFrame.select` is typed to accept `IntoExpr`,\nas it can either accept a `nw.Expr` (e.g. `df.select(nw.col('a'))`) or a string\nwhich will be interpreted as a `nw.Expr`, e.g. `df.select('a')`.\n"""\n\nIntoDataFrame: TypeAlias = Union["NativeFrame", "DataFrameLike"]\n"""Anything which can be converted to a Narwhals DataFrame.\n\nUse this if your function accepts a narwhalifiable object but doesn't care about its backend.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoDataFrame\n >>> def agnostic_shape(df_native: IntoDataFrame) -> tuple[int, int]:\n ... df = nw.from_native(df_native, eager_only=True)\n ... return df.shape\n"""\n\nIntoLazyFrame: TypeAlias = "NativeLazyFrame"\n\nIntoFrame: TypeAlias = Union["IntoDataFrame", "IntoLazyFrame"]\n"""Anything which can be converted to a Narwhals DataFrame or LazyFrame.\n\nUse this if your function can accept an object which can be converted to either\n`nw.DataFrame` or `nw.LazyFrame` and it doesn't care about its backend.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoFrame\n >>> def agnostic_columns(df_native: IntoFrame) -> list[str]:\n ... df = nw.from_native(df_native)\n ... return df.collect_schema().names()\n"""\n\nFrame: TypeAlias = Union["DataFrame[Any]", "LazyFrame[Any]"]\n"""Narwhals DataFrame or Narwhals LazyFrame.\n\nUse this if your function can work with either and your function doesn't care\nabout its backend.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import Frame\n >>> @nw.narwhalify\n ... def agnostic_columns(df: Frame) -> list[str]:\n ... return df.columns\n"""\n\nIntoSeries: TypeAlias = "NativeSeries"\n"""Anything which can be converted to a Narwhals Series.\n\nUse this if your function can accept an object which can be converted to `nw.Series`\nand it doesn't care about its backend.\n\nExamples:\n >>> from typing import Any\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoSeries\n >>> def agnostic_to_list(s_native: IntoSeries) -> list[Any]:\n ... s = nw.from_native(s_native)\n ... return s.to_list()\n"""\n\nIntoFrameT = TypeVar("IntoFrameT", bound="IntoFrame")\n"""TypeVar bound to object convertible to Narwhals DataFrame or Narwhals LazyFrame.\n\nUse this if your function accepts an object which is convertible to `nw.DataFrame`\nor `nw.LazyFrame` and returns an object of the same type.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoFrameT\n >>> def agnostic_func(df_native: IntoFrameT) -> IntoFrameT:\n ... df = nw.from_native(df_native)\n ... return df.with_columns(c=nw.col("a") + 1).to_native()\n"""\n\nIntoDataFrameT = TypeVar("IntoDataFrameT", bound="IntoDataFrame")\n"""TypeVar bound to object convertible to Narwhals DataFrame.\n\nUse this if your function accepts an object which can be converted to `nw.DataFrame`\nand returns an object of the same class.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoDataFrameT\n >>> def agnostic_func(df_native: IntoDataFrameT) -> IntoDataFrameT:\n ... df = nw.from_native(df_native, eager_only=True)\n ... return df.with_columns(c=df["a"] + 1).to_native()\n"""\n\nIntoLazyFrameT = TypeVar("IntoLazyFrameT", bound="IntoLazyFrame")\n\nFrameT = TypeVar("FrameT", "DataFrame[Any]", "LazyFrame[Any]")\n"""TypeVar bound to Narwhals DataFrame or Narwhals LazyFrame.\n\nUse this if your function accepts either `nw.DataFrame` or `nw.LazyFrame` and returns\nan object of the same kind.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import FrameT\n >>> @nw.narwhalify\n ... def agnostic_func(df: FrameT) -> FrameT:\n ... return df.with_columns(c=nw.col("a") + 1)\n"""\n\nDataFrameT = TypeVar("DataFrameT", bound="DataFrame[Any]")\n"""TypeVar bound to Narwhals DataFrame.\n\nUse this if your function can accept a Narwhals DataFrame and returns a Narwhals\nDataFrame backed by the same backend.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import DataFrameT\n >>> @nw.narwhalify\n >>> def func(df: DataFrameT) -> DataFrameT:\n ... return df.with_columns(c=df["a"] + 1)\n"""\n\nLazyFrameT = TypeVar("LazyFrameT", bound="LazyFrame[Any]")\nSeriesT = TypeVar("SeriesT", bound="Series[Any]")\n\nIntoSeriesT = TypeVar("IntoSeriesT", bound="IntoSeries")\n"""TypeVar bound to object convertible to Narwhals Series.\n\nUse this if your function accepts an object which can be converted to `nw.Series`\nand returns an object of the same class.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoSeriesT\n >>> def agnostic_abs(s_native: IntoSeriesT) -> IntoSeriesT:\n ... s = nw.from_native(s_native, series_only=True)\n ... return s.abs().to_native()\n"""\n\nDTypeBackend: TypeAlias = 'Literal["pyarrow", "numpy_nullable"] | None'\nSizeUnit: TypeAlias = Literal[\n "b",\n "kb",\n "mb",\n "gb",\n "tb",\n "bytes",\n "kilobytes",\n "megabytes",\n "gigabytes",\n "terabytes",\n]\n\nTimeUnit: TypeAlias = Literal["ns", "us", "ms", "s"]\n\nAsofJoinStrategy: TypeAlias = Literal["backward", "forward", "nearest"]\n"""Join strategy.\n\n- *"backward"*: Selects the last row in the right DataFrame whose `on` key\n is less than or equal to the left's key.\n- *"forward"*: Selects the first row in the right DataFrame whose `on` key\n is greater than or equal to the left's key.\n- *"nearest"*: Search selects the last row in the right DataFrame whose value\n is nearest to the left's key.\n"""\n\nClosedInterval: TypeAlias = Literal["left", "right", "none", "both"]\n"""Define which sides of the interval are closed (inclusive)."""\n\nConcatMethod: TypeAlias = Literal["horizontal", "vertical", "diagonal"]\n"""Concatenating strategy.\n\n- *"vertical"*: Concatenate vertically. Column names must match.\n- *"horizontal"*: Concatenate horizontally. If lengths don't match, then\n missing rows are filled with null values.\n- *"diagonal"*: Finds a union between the column schemas and fills missing\n column values with null.\n"""\n\nFillNullStrategy: TypeAlias = Literal["forward", "backward"]\n"""Strategy used to fill null values."""\n\nJoinStrategy: TypeAlias = Literal["inner", "left", "full", "cross", "semi", "anti"]\n"""Join strategy.\n\n- *"inner"*: Returns rows that have matching values in both tables.\n- *"left"*: Returns all rows from the left table, and the matched rows from\n the right table.\n- *"full"*: Returns all rows in both dataframes, with the `suffix` appended to\n the right join keys.\n- *"cross"*: Returns the Cartesian product of rows from both tables.\n- *"semi"*: Filter rows that have a match in the right table.\n- *"anti"*: Filter rows that do not have a match in the right table.\n"""\n\nPivotAgg: TypeAlias = Literal[\n "min", "max", "first", "last", "sum", "mean", "median", "len"\n]\n"""A predefined aggregate function string."""\n\nRankMethod: TypeAlias = Literal["average", "min", "max", "dense", "ordinal"]\n"""The method used to assign ranks to tied elements.\n\n- *"average"*: The average of the ranks that would have been assigned to\n all the tied values is assigned to each value.\n- *"min"*: The minimum of the ranks that would have been assigned to all\n the tied values is assigned to each value. (This is also referred to\n as "competition" ranking.)\n- *"max"*: The maximum of the ranks that would have been assigned to all\n the tied values is assigned to each value.\n- *"dense"*: Like "min", but the rank of the next highest element is\n assigned the rank immediately after those assigned to the tied elements.\n- *"ordinal"*: All values are given a distinct rank, corresponding to the\n order that the values occur in the Series.\n"""\n\nRollingInterpolationMethod: TypeAlias = Literal[\n "nearest", "higher", "lower", "midpoint", "linear"\n]\n"""Interpolation method."""\n\nUniqueKeepStrategy: TypeAlias = Literal["any", "first", "last", "none"]\n"""Which of the duplicate rows to keep.\n\n- *"any"*: Does not give any guarantee of which row is kept.\n This allows more optimizations.\n- *"none"*: Don't keep duplicate rows.\n- *"first"*: Keep first unique row.\n- *"last"*: Keep last unique row.\n"""\n\nLazyUniqueKeepStrategy: TypeAlias = Literal["any", "none"]\n"""Which of the duplicate rows to keep.\n\n- *"any"*: Does not give any guarantee of which row is kept.\n- *"none"*: Don't keep duplicate rows.\n"""\n\n\n_ShapeT = TypeVar("_ShapeT", bound="tuple[int, ...]")\n_NDArray: TypeAlias = "np.ndarray[_ShapeT, Any]"\n_1DArray: TypeAlias = "_NDArray[tuple[int]]" # noqa: PYI042\n_1DArrayInt: TypeAlias = "np.ndarray[tuple[int], np.dtype[np.integer[Any]]]" # noqa: PYI042\n_2DArray: TypeAlias = "_NDArray[tuple[int, int]]" # noqa: PYI042, PYI047\n_AnyDArray: TypeAlias = "_NDArray[tuple[int, ...]]" # noqa: PYI047\n_NumpyScalar: TypeAlias = "np.generic[Any]"\nInto1DArray: TypeAlias = "_1DArray | _NumpyScalar"\n"""A 1-dimensional `numpy.ndarray` or scalar that can be converted into one."""\n\n\nNumericLiteral: TypeAlias = "int | float | Decimal"\nTemporalLiteral: TypeAlias = "dt.date | dt.datetime | dt.time | dt.timedelta"\nNonNestedLiteral: TypeAlias = (\n "NumericLiteral | TemporalLiteral | str | bool | bytes | None"\n)\nPythonLiteral: TypeAlias = "NonNestedLiteral | list[Any] | tuple[Any, ...]"\n\nNonNestedDType: TypeAlias = "dtypes.NumericType | dtypes.TemporalType | dtypes.String | dtypes.Boolean | dtypes.Binary | dtypes.Categorical | dtypes.Unknown | dtypes.Object"\n"""Any Narwhals DType that does not have required arguments."""\n\nIntoDType: TypeAlias = "dtypes.DType | type[NonNestedDType]"\n"""Anything that can be converted into a Narwhals DType.\n\nExamples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3], "b": [4.0, 5.0, 6.0]})\n >>> df = nw.from_native(df_native)\n >>> df.select(\n ... nw.col("a").cast(nw.Int32),\n ... nw.col("b").cast(nw.String()).str.split(".").cast(nw.List(nw.Int8)),\n ... )\n ┌──────────────────┐\n |Narwhals DataFrame|\n |------------------|\n |shape: (3, 2) |\n |┌─────┬──────────┐|\n |│ a ┆ b │|\n |│ --- ┆ --- │|\n |│ i32 ┆ list[i8] │|\n |╞═════╪══════════╡|\n |│ 1 ┆ [4, 0] │|\n |│ 2 ┆ [5, 0] │|\n |│ 3 ┆ [6, 0] │|\n |└─────┴──────────┘|\n └──────────────────┘\n"""\n\n\n# Annotations for `__getitem__` methods\n_T = TypeVar("_T")\n_Slice: TypeAlias = "slice[_T, Any, Any] | slice[Any, _T, Any] | slice[None, None, _T]"\n_SliceNone: TypeAlias = "slice[None, None, None]"\n# Index/column positions\nSingleIndexSelector: TypeAlias = int\n_SliceIndex: TypeAlias = "_Slice[int] | _SliceNone"\n"""E.g. `[1:]` or `[:3]` or `[::2]`."""\nSizedMultiIndexSelector: TypeAlias = "Sequence[int] | _T | _1DArrayInt"\nMultiIndexSelector: TypeAlias = "_SliceIndex | SizedMultiIndexSelector[_T]"\n# Labels/column names\nSingleNameSelector: TypeAlias = str\n_SliceName: TypeAlias = "_Slice[str] | _SliceNone"\nSizedMultiNameSelector: TypeAlias = "Sequence[str] | _T | _1DArray"\nMultiNameSelector: TypeAlias = "_SliceName | SizedMultiNameSelector[_T]"\n# Mixed selectors\nSingleColSelector: TypeAlias = "SingleIndexSelector | SingleNameSelector"\nMultiColSelector: TypeAlias = "MultiIndexSelector[_T] | MultiNameSelector[_T]"\n\n\n__all__ = [\n "CompliantDataFrame",\n "CompliantLazyFrame",\n "CompliantSeries",\n "DataFrameT",\n "Frame",\n "FrameT",\n "IntoDataFrame",\n "IntoDataFrameT",\n "IntoExpr",\n "IntoFrame",\n "IntoFrameT",\n "IntoSeries",\n "IntoSeriesT",\n]\n
.venv\Lib\site-packages\narwhals\typing.py
typing.py
Python
15,334
0.95
0.170561
0.022599
python-kit
651
2025-06-20T16:33:21.980570
MIT
false
5ba61349ef8a66231e4b96db7b5e3ecf
# Re-export some functions from `_utils` to make them public.\nfrom __future__ import annotations\n\nfrom narwhals._utils import Implementation, Version, parse_version\n\n__all__ = ["Implementation", "Version", "parse_version"]\n
.venv\Lib\site-packages\narwhals\utils.py
utils.py
Python
223
0.95
0
0.25
awesome-app
847
2025-05-09T16:25:24.096477
BSD-3-Clause
false
cff1beca8cf9c2ec3d2f6cfae800ef4b
from __future__ import annotations\n\nimport datetime as dt\n\n# Temporal (from `polars._utils.constants`)\nSECONDS_PER_DAY = 86_400\nSECONDS_PER_MINUTE = 60\nNS_PER_MINUTE = 60_000_000_000\n"""Nanoseconds (`[ns]`) per minute."""\nUS_PER_MINUTE = 60_000_000\n"""Microseconds (`[μs]`) per minute."""\nMS_PER_MINUTE = 60_000\n"""Milliseconds (`[ms]`) per minute."""\nNS_PER_SECOND = 1_000_000_000\n"""Nanoseconds (`[ns]`) per second (`[s]`)."""\nUS_PER_SECOND = 1_000_000\n"""Microseconds (`[μs]`) per second (`[s]`)."""\nMS_PER_SECOND = 1_000\n"""Milliseconds (`[ms]`) per second (`[s]`)."""\nNS_PER_MICROSECOND = 1_000\n"""Nanoseconds (`[ns]`) per microsecond (`[μs]`)."""\nNS_PER_MILLISECOND = 1_000_000\n"""Nanoseconds (`[ns]`) per millisecond (`[ms]`).\n\nFrom [polars](https://github.com/pola-rs/polars/blob/2c7a3e77f0faa37c86a3745db4ef7707ae50c72e/crates/polars-time/src/chunkedarray/duration.rs#L7).\n"""\nEPOCH_YEAR = 1970\n"""See [Unix time](https://en.wikipedia.org/wiki/Unix_time)."""\nEPOCH = dt.datetime(EPOCH_YEAR, 1, 1).replace(tzinfo=None)\n"""See [Unix time](https://en.wikipedia.org/wiki/Unix_time)."""\n
.venv\Lib\site-packages\narwhals\_constants.py
_constants.py
Python
1,094
0.95
0
0.037037
vue-tools
999
2024-04-12T09:03:49.927804
MIT
false
538a7fff8771a26a6f828a8fd19a1078
"""Tools for working with the Polars duration string language."""\n\nfrom __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING, Literal, cast, get_args\n\nif TYPE_CHECKING:\n from typing_extensions import TypeAlias\n\n__all__ = ["IntervalUnit", "parse_interval_string"]\n\nIntervalUnit: TypeAlias = Literal["ns", "us", "ms", "s", "m", "h", "d", "mo", "q", "y"]\n"""A Polars duration string interval unit.\n\n- 'ns': nanosecond.\n- 'us': microsecond.\n- 'ms': millisecond.\n- 's': second.\n- 'm': minute.\n- 'h': hour.\n- 'd': day.\n- 'mo': month.\n- 'q': quarter.\n- 'y': year.\n"""\n\nPATTERN_INTERVAL: re.Pattern[str] = re.compile(\n r"^(?P<multiple>\d+)(?P<unit>ns|us|ms|mo|m|s|h|d|q|y)\Z"\n)\nMONTH_MULTIPLES = frozenset([1, 2, 3, 4, 6, 12])\nQUARTER_MULTIPLES = frozenset([1, 2, 4])\n\n\ndef parse_interval_string(every: str) -> tuple[int, IntervalUnit]:\n """Parse a string like "1d", "2h", "3m" into a tuple of (number, unit).\n\n Returns:\n A tuple of multiple and unit parsed from the interval string.\n """\n if match := PATTERN_INTERVAL.match(every):\n multiple = int(match["multiple"])\n unit = cast("IntervalUnit", match["unit"])\n if unit == "mo" and multiple not in MONTH_MULTIPLES:\n msg = f"Only the following multiples are supported for 'mo' unit: {MONTH_MULTIPLES}.\nGot: {multiple}."\n raise ValueError(msg)\n if unit == "q" and multiple not in QUARTER_MULTIPLES:\n msg = f"Only the following multiples are supported for 'q' unit: {QUARTER_MULTIPLES}.\nGot: {multiple}."\n raise ValueError(msg)\n if unit == "y" and multiple != 1:\n msg = (\n f"Only multiple 1 is currently supported for 'y' unit.\nGot: {multiple}."\n )\n raise ValueError(msg)\n return multiple, unit\n msg = (\n f"Invalid `every` string: {every}. Expected string of kind <number><unit>, "\n f"where 'unit' is one of: {get_args(IntervalUnit)}."\n )\n raise ValueError(msg)\n
.venv\Lib\site-packages\narwhals\_duration.py
_duration.py
Python
2,008
0.85
0.166667
0
node-utils
957
2024-02-05T15:43:07.980197
BSD-3-Clause
false
b0a1798668ec8cebc43b804bdb68fd53
from __future__ import annotations\n\n# ruff: noqa: ARG004\nfrom enum import Enum\nfrom typing import Any\n\n\nclass NoAutoEnum(Enum):\n """Enum base class that prohibits the use of enum.auto() for value assignment.\n\n This behavior is achieved by overriding the value generation mechanism.\n\n Examples:\n >>> from enum import auto\n >>> from narwhals._enum import NoAutoEnum\n >>>\n >>> class Colors(NoAutoEnum):\n ... RED = 1\n ... GREEN = 2\n >>> Colors.RED\n <Colors.RED: 1>\n\n >>> class ColorsWithAuto(NoAutoEnum):\n ... RED = 1\n ... GREEN = auto()\n Traceback (most recent call last):\n ...\n ValueError: Creating values with `auto()` is not allowed. Please provide a value manually instead.\n\n Raises:\n ValueError: If `auto()` is attempted to be used for any enum member value.\n """\n\n @staticmethod\n def _generate_next_value_(\n name: str, start: int, count: int, last_values: list[Any]\n ) -> Any:\n msg = "Creating values with `auto()` is not allowed. Please provide a value manually instead."\n raise ValueError(msg)\n\n\n__all__ = ["NoAutoEnum"]\n
.venv\Lib\site-packages\narwhals\_enum.py
_enum.py
Python
1,192
0.95
0.166667
0.03125
python-kit
624
2024-10-16T14:31:13.482625
MIT
false
9067f67d8120be6dcd59b418c69822af
# Utilities for expression parsing\n# Useful for backends which don't have any concept of expressions, such\n# and pandas or PyArrow.\nfrom __future__ import annotations\n\nfrom enum import Enum, auto\nfrom itertools import chain\nfrom typing import TYPE_CHECKING, Any, Literal, TypeVar, cast\n\nfrom narwhals._utils import is_compliant_expr\nfrom narwhals.dependencies import is_narwhals_series, is_numpy_array\nfrom narwhals.exceptions import (\n InvalidOperationError,\n LengthChangingExprError,\n MultiOutputExpressionError,\n ShapeError,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from typing_extensions import Never, TypeIs\n\n from narwhals._compliant import CompliantExpr, CompliantFrameT\n from narwhals._compliant.typing import (\n AliasNames,\n CompliantExprAny,\n CompliantFrameAny,\n CompliantNamespaceAny,\n EagerNamespaceAny,\n EvalNames,\n )\n from narwhals.expr import Expr\n from narwhals.series import Series\n from narwhals.typing import IntoExpr, NonNestedLiteral, _1DArray\n\n T = TypeVar("T")\n\n\ndef is_expr(obj: Any) -> TypeIs[Expr]:\n """Check whether `obj` is a Narwhals Expr."""\n from narwhals.expr import Expr\n\n return isinstance(obj, Expr)\n\n\ndef is_series(obj: Any) -> TypeIs[Series[Any]]:\n """Check whether `obj` is a Narwhals Expr."""\n from narwhals.series import Series\n\n return isinstance(obj, Series)\n\n\ndef combine_evaluate_output_names(\n *exprs: CompliantExpr[CompliantFrameT, Any],\n) -> EvalNames[CompliantFrameT]:\n # Follow left-hand-rule for naming. E.g. `nw.sum_horizontal(expr1, expr2)` takes the\n # first name of `expr1`.\n if not is_compliant_expr(exprs[0]): # pragma: no cover\n msg = f"Safety assertion failed, expected expression, got: {type(exprs[0])}. Please report a bug."\n raise AssertionError(msg)\n\n def evaluate_output_names(df: CompliantFrameT) -> Sequence[str]:\n return exprs[0]._evaluate_output_names(df)[:1]\n\n return evaluate_output_names\n\n\ndef combine_alias_output_names(*exprs: CompliantExprAny) -> AliasNames | None:\n # Follow left-hand-rule for naming. E.g. `nw.sum_horizontal(expr1.alias(alias), expr2)` takes the\n # aliasing function of `expr1` and apply it to the first output name of `expr1`.\n if exprs[0]._alias_output_names is None:\n return None\n\n def alias_output_names(names: Sequence[str]) -> Sequence[str]:\n return exprs[0]._alias_output_names(names)[:1] # type: ignore[misc]\n\n return alias_output_names\n\n\ndef extract_compliant(\n plx: CompliantNamespaceAny,\n other: IntoExpr | NonNestedLiteral | _1DArray,\n *,\n str_as_lit: bool,\n) -> CompliantExprAny | NonNestedLiteral:\n if is_expr(other):\n return other._to_compliant_expr(plx)\n if isinstance(other, str) and not str_as_lit:\n return plx.col(other)\n if is_narwhals_series(other):\n return other._compliant_series._to_expr()\n if is_numpy_array(other):\n ns = cast("EagerNamespaceAny", plx)\n return ns._series.from_numpy(other, context=ns)._to_expr()\n return other\n\n\ndef evaluate_output_names_and_aliases(\n expr: CompliantExprAny, df: CompliantFrameAny, exclude: Sequence[str]\n) -> tuple[Sequence[str], Sequence[str]]:\n output_names = expr._evaluate_output_names(df)\n aliases = (\n output_names\n if expr._alias_output_names is None\n else expr._alias_output_names(output_names)\n )\n if exclude:\n assert expr._metadata is not None # noqa: S101\n if expr._metadata.expansion_kind.is_multi_unnamed():\n output_names, aliases = zip(\n *[\n (x, alias)\n for x, alias in zip(output_names, aliases)\n if x not in exclude\n ]\n )\n return output_names, aliases\n\n\nclass ExprKind(Enum):\n """Describe which kind of expression we are dealing with."""\n\n LITERAL = auto()\n """e.g. `nw.lit(1)`"""\n\n AGGREGATION = auto()\n """Reduces to a single value, not affected by row order, e.g. `nw.col('a').mean()`"""\n\n ORDERABLE_AGGREGATION = auto()\n """Reduces to a single value, affected by row order, e.g. `nw.col('a').arg_max()`"""\n\n ELEMENTWISE = auto()\n """Preserves length, can operate without context for surrounding rows, e.g. `nw.col('a').abs()`."""\n\n ORDERABLE_WINDOW = auto()\n """Depends on the rows around it and on their order, e.g. `diff`."""\n\n UNORDERABLE_WINDOW = auto()\n """Depends on the rows around it but not on their order, e.g. `rank`."""\n\n FILTRATION = auto()\n """Changes length, not affected by row order, e.g. `drop_nulls`."""\n\n ORDERABLE_FILTRATION = auto()\n """Changes length, affected by row order, e.g. `tail`."""\n\n NARY = auto()\n """Results from the combination of multiple expressions."""\n\n OVER = auto()\n """Results from calling `.over` on expression."""\n\n UNKNOWN = auto()\n """Based on the information we have, we can't determine the ExprKind."""\n\n @property\n def is_scalar_like(self) -> bool:\n return self in {ExprKind.LITERAL, ExprKind.AGGREGATION}\n\n @property\n def is_orderable_window(self) -> bool:\n return self in {ExprKind.ORDERABLE_WINDOW, ExprKind.ORDERABLE_AGGREGATION}\n\n @classmethod\n def from_expr(cls, obj: Expr) -> ExprKind:\n meta = obj._metadata\n if meta.is_literal:\n return ExprKind.LITERAL\n if meta.is_scalar_like:\n return ExprKind.AGGREGATION\n if meta.is_elementwise:\n return ExprKind.ELEMENTWISE\n return ExprKind.UNKNOWN\n\n @classmethod\n def from_into_expr(\n cls, obj: IntoExpr | NonNestedLiteral | _1DArray, *, str_as_lit: bool\n ) -> ExprKind:\n if is_expr(obj):\n return cls.from_expr(obj)\n if (\n is_narwhals_series(obj)\n or is_numpy_array(obj)\n or (isinstance(obj, str) and not str_as_lit)\n ):\n return ExprKind.ELEMENTWISE\n return ExprKind.LITERAL\n\n\ndef is_scalar_like(\n obj: ExprKind,\n) -> TypeIs[Literal[ExprKind.LITERAL, ExprKind.AGGREGATION]]:\n return obj.is_scalar_like\n\n\nclass ExpansionKind(Enum):\n """Describe what kind of expansion the expression performs."""\n\n SINGLE = auto()\n """e.g. `nw.col('a'), nw.sum_horizontal(nw.all())`"""\n\n MULTI_NAMED = auto()\n """e.g. `nw.col('a', 'b')`"""\n\n MULTI_UNNAMED = auto()\n """e.g. `nw.all()`, nw.nth(0, 1)"""\n\n def is_multi_unnamed(self) -> bool:\n return self is ExpansionKind.MULTI_UNNAMED\n\n def is_multi_output(self) -> bool:\n return self in {ExpansionKind.MULTI_NAMED, ExpansionKind.MULTI_UNNAMED}\n\n def __and__(self, other: ExpansionKind) -> Literal[ExpansionKind.MULTI_UNNAMED]:\n if self is ExpansionKind.MULTI_UNNAMED and other is ExpansionKind.MULTI_UNNAMED:\n # e.g. nw.selectors.all() - nw.selectors.numeric().\n return ExpansionKind.MULTI_UNNAMED\n # Don't attempt anything more complex, keep it simple and raise in the face of ambiguity.\n msg = f"Unsupported ExpansionKind combination, got {self} and {other}, please report a bug." # pragma: no cover\n raise AssertionError(msg) # pragma: no cover\n\n\nclass ExprMetadata:\n __slots__ = (\n "expansion_kind",\n "has_windows",\n "is_elementwise",\n "is_literal",\n "is_scalar_like",\n "last_node",\n "n_orderable_ops",\n "preserves_length",\n )\n\n def __init__(\n self,\n expansion_kind: ExpansionKind,\n last_node: ExprKind,\n *,\n has_windows: bool = False,\n n_orderable_ops: int = 0,\n preserves_length: bool = True,\n is_elementwise: bool = True,\n is_scalar_like: bool = False,\n is_literal: bool = False,\n ) -> None:\n if is_literal:\n assert is_scalar_like # noqa: S101 # debug assertion\n if is_elementwise:\n assert preserves_length # noqa: S101 # debug assertion\n self.expansion_kind: ExpansionKind = expansion_kind\n self.last_node: ExprKind = last_node\n self.has_windows: bool = has_windows\n self.n_orderable_ops: int = n_orderable_ops\n self.is_elementwise: bool = is_elementwise\n self.preserves_length: bool = preserves_length\n self.is_scalar_like: bool = is_scalar_like\n self.is_literal: bool = is_literal\n\n def __init_subclass__(cls, /, *args: Any, **kwds: Any) -> Never: # pragma: no cover\n msg = f"Cannot subclass {cls.__name__!r}"\n raise TypeError(msg)\n\n def __repr__(self) -> str: # pragma: no cover\n return (\n f"ExprMetadata(\n"\n f" expansion_kind: {self.expansion_kind},\n"\n f" last_node: {self.last_node},\n"\n f" has_windows: {self.has_windows},\n"\n f" n_orderable_ops: {self.n_orderable_ops},\n"\n f" is_elementwise: {self.is_elementwise},\n"\n f" preserves_length: {self.preserves_length},\n"\n f" is_scalar_like: {self.is_scalar_like},\n"\n f" is_literal: {self.is_literal},\n"\n ")"\n )\n\n @property\n def is_filtration(self) -> bool:\n return not self.preserves_length and not self.is_scalar_like\n\n def with_aggregation(self) -> ExprMetadata:\n if self.is_scalar_like:\n msg = "Can't apply aggregations to scalar-like expressions."\n raise InvalidOperationError(msg)\n return ExprMetadata(\n self.expansion_kind,\n ExprKind.AGGREGATION,\n has_windows=self.has_windows,\n n_orderable_ops=self.n_orderable_ops,\n preserves_length=False,\n is_elementwise=False,\n is_scalar_like=True,\n is_literal=False,\n )\n\n def with_orderable_aggregation(self) -> ExprMetadata:\n if self.is_scalar_like:\n msg = "Can't apply aggregations to scalar-like expressions."\n raise InvalidOperationError(msg)\n return ExprMetadata(\n self.expansion_kind,\n ExprKind.ORDERABLE_AGGREGATION,\n has_windows=self.has_windows,\n n_orderable_ops=self.n_orderable_ops + 1,\n preserves_length=False,\n is_elementwise=False,\n is_scalar_like=True,\n is_literal=False,\n )\n\n def with_elementwise_op(self) -> ExprMetadata:\n return ExprMetadata(\n self.expansion_kind,\n ExprKind.ELEMENTWISE,\n has_windows=self.has_windows,\n n_orderable_ops=self.n_orderable_ops,\n preserves_length=self.preserves_length,\n is_elementwise=self.is_elementwise,\n is_scalar_like=self.is_scalar_like,\n is_literal=self.is_literal,\n )\n\n def with_unorderable_window(self) -> ExprMetadata:\n if self.is_scalar_like:\n msg = "Can't apply unorderable window (`rank`, `is_unique`) to scalar-like expression."\n raise InvalidOperationError(msg)\n return ExprMetadata(\n self.expansion_kind,\n ExprKind.UNORDERABLE_WINDOW,\n has_windows=self.has_windows,\n n_orderable_ops=self.n_orderable_ops,\n preserves_length=self.preserves_length,\n is_elementwise=False,\n is_scalar_like=False,\n is_literal=False,\n )\n\n def with_orderable_window(self) -> ExprMetadata:\n if self.is_scalar_like:\n msg = "Can't apply orderable window (e.g. `diff`, `shift`) to scalar-like expression."\n raise InvalidOperationError(msg)\n return ExprMetadata(\n self.expansion_kind,\n ExprKind.ORDERABLE_WINDOW,\n has_windows=self.has_windows,\n n_orderable_ops=self.n_orderable_ops + 1,\n preserves_length=self.preserves_length,\n is_elementwise=False,\n is_scalar_like=False,\n is_literal=False,\n )\n\n def with_ordered_over(self) -> ExprMetadata:\n if self.has_windows:\n msg = "Cannot nest `over` statements."\n raise InvalidOperationError(msg)\n if self.is_elementwise or self.is_filtration:\n msg = (\n "Cannot use `over` on expressions which are elementwise\n"\n "(e.g. `abs`) or which change length (e.g. `drop_nulls`)."\n )\n raise InvalidOperationError(msg)\n n_orderable_ops = self.n_orderable_ops\n if not n_orderable_ops:\n msg = "Cannot use `order_by` in `over` on expression which isn't orderable."\n raise InvalidOperationError(msg)\n if self.last_node.is_orderable_window:\n n_orderable_ops -= 1\n return ExprMetadata(\n self.expansion_kind,\n ExprKind.OVER,\n has_windows=True,\n n_orderable_ops=n_orderable_ops,\n preserves_length=True,\n is_elementwise=False,\n is_scalar_like=False,\n is_literal=False,\n )\n\n def with_partitioned_over(self) -> ExprMetadata:\n if self.has_windows:\n msg = "Cannot nest `over` statements."\n raise InvalidOperationError(msg)\n if self.is_elementwise or self.is_filtration:\n msg = (\n "Cannot use `over` on expressions which are elementwise\n"\n "(e.g. `abs`) or which change length (e.g. `drop_nulls`)."\n )\n raise InvalidOperationError(msg)\n return ExprMetadata(\n self.expansion_kind,\n ExprKind.OVER,\n has_windows=True,\n n_orderable_ops=self.n_orderable_ops,\n preserves_length=True,\n is_elementwise=False,\n is_scalar_like=False,\n is_literal=False,\n )\n\n def with_filtration(self) -> ExprMetadata:\n if self.is_scalar_like:\n msg = "Can't apply filtration (e.g. `drop_nulls`) to scalar-like expression."\n raise InvalidOperationError(msg)\n return ExprMetadata(\n self.expansion_kind,\n ExprKind.FILTRATION,\n has_windows=self.has_windows,\n n_orderable_ops=self.n_orderable_ops,\n preserves_length=False,\n is_elementwise=False,\n is_scalar_like=False,\n is_literal=False,\n )\n\n def with_orderable_filtration(self) -> ExprMetadata:\n if self.is_scalar_like:\n msg = "Can't apply filtration (e.g. `drop_nulls`) to scalar-like expression."\n raise InvalidOperationError(msg)\n return ExprMetadata(\n self.expansion_kind,\n ExprKind.ORDERABLE_FILTRATION,\n has_windows=self.has_windows,\n n_orderable_ops=self.n_orderable_ops + 1,\n preserves_length=False,\n is_elementwise=False,\n is_scalar_like=False,\n is_literal=False,\n )\n\n @staticmethod\n def aggregation() -> ExprMetadata:\n return ExprMetadata(\n ExpansionKind.SINGLE,\n ExprKind.AGGREGATION,\n is_elementwise=False,\n preserves_length=False,\n is_scalar_like=True,\n )\n\n @staticmethod\n def literal() -> ExprMetadata:\n return ExprMetadata(\n ExpansionKind.SINGLE,\n ExprKind.LITERAL,\n is_elementwise=False,\n preserves_length=False,\n is_literal=True,\n is_scalar_like=True,\n )\n\n @staticmethod\n def selector_single() -> ExprMetadata:\n # e.g. `nw.col('a')`, `nw.nth(0)`\n return ExprMetadata(ExpansionKind.SINGLE, ExprKind.ELEMENTWISE)\n\n @staticmethod\n def selector_multi_named() -> ExprMetadata:\n # e.g. `nw.col('a', 'b')`\n return ExprMetadata(ExpansionKind.MULTI_NAMED, ExprKind.ELEMENTWISE)\n\n @staticmethod\n def selector_multi_unnamed() -> ExprMetadata:\n # e.g. `nw.all()`\n return ExprMetadata(ExpansionKind.MULTI_UNNAMED, ExprKind.ELEMENTWISE)\n\n @classmethod\n def from_binary_op(cls, lhs: Expr, rhs: IntoExpr, /) -> ExprMetadata:\n # We may be able to allow multi-output rhs in the future:\n # https://github.com/narwhals-dev/narwhals/issues/2244.\n return combine_metadata(\n lhs, rhs, str_as_lit=True, allow_multi_output=False, to_single_output=False\n )\n\n @classmethod\n def from_horizontal_op(cls, *exprs: IntoExpr) -> ExprMetadata:\n return combine_metadata(\n *exprs, str_as_lit=False, allow_multi_output=True, to_single_output=True\n )\n\n\ndef combine_metadata( # noqa: C901, PLR0912\n *args: IntoExpr | object | None,\n str_as_lit: bool,\n allow_multi_output: bool,\n to_single_output: bool,\n) -> ExprMetadata:\n """Combine metadata from `args`.\n\n Arguments:\n args: Arguments, maybe expressions, literals, or Series.\n str_as_lit: Whether to interpret strings as literals or as column names.\n allow_multi_output: Whether to allow multi-output inputs.\n to_single_output: Whether the result is always single-output, regardless\n of the inputs (e.g. `nw.sum_horizontal`).\n """\n n_filtrations = 0\n result_expansion_kind = ExpansionKind.SINGLE\n result_has_windows = False\n result_n_orderable_ops = 0\n # result preserves length if at least one input does\n result_preserves_length = False\n # result is elementwise if all inputs are elementwise\n result_is_not_elementwise = False\n # result is scalar-like if all inputs are scalar-like\n result_is_not_scalar_like = False\n # result is literal if all inputs are literal\n result_is_not_literal = False\n\n for i, arg in enumerate(args): # noqa: PLR1702\n if (isinstance(arg, str) and not str_as_lit) or is_series(arg):\n result_preserves_length = True\n result_is_not_scalar_like = True\n result_is_not_literal = True\n elif is_expr(arg):\n metadata = arg._metadata\n if metadata.expansion_kind.is_multi_output():\n expansion_kind = metadata.expansion_kind\n if i > 0 and not allow_multi_output:\n # Left-most argument is always allowed to be multi-output.\n msg = (\n "Multi-output expressions (e.g. nw.col('a', 'b'), nw.all()) "\n "are not supported in this context."\n )\n raise MultiOutputExpressionError(msg)\n if not to_single_output:\n if i == 0:\n result_expansion_kind = expansion_kind\n else:\n result_expansion_kind = result_expansion_kind & expansion_kind\n\n if metadata.has_windows:\n result_has_windows = True\n result_n_orderable_ops += metadata.n_orderable_ops\n if metadata.preserves_length:\n result_preserves_length = True\n if not metadata.is_elementwise:\n result_is_not_elementwise = True\n if not metadata.is_scalar_like:\n result_is_not_scalar_like = True\n if not metadata.is_literal:\n result_is_not_literal = True\n if metadata.is_filtration:\n n_filtrations += 1\n\n if n_filtrations > 1:\n msg = "Length-changing expressions can only be used in isolation, or followed by an aggregation"\n raise LengthChangingExprError(msg)\n if result_preserves_length and n_filtrations:\n msg = "Cannot combine length-changing expressions with length-preserving ones or aggregations"\n raise ShapeError(msg)\n\n return ExprMetadata(\n result_expansion_kind,\n ExprKind.NARY,\n has_windows=result_has_windows,\n n_orderable_ops=result_n_orderable_ops,\n preserves_length=result_preserves_length,\n is_elementwise=not result_is_not_elementwise,\n is_scalar_like=not result_is_not_scalar_like,\n is_literal=not result_is_not_literal,\n )\n\n\ndef check_expressions_preserve_length(*args: IntoExpr, function_name: str) -> None:\n # Raise if any argument in `args` isn't length-preserving.\n # For Series input, we don't raise (yet), we let such checks happen later,\n # as this function works lazily and so can't evaluate lengths.\n from narwhals.series import Series\n\n if not all(\n (is_expr(x) and x._metadata.preserves_length) or isinstance(x, (str, Series))\n for x in args\n ):\n msg = f"Expressions which aggregate or change length cannot be passed to '{function_name}'."\n raise ShapeError(msg)\n\n\ndef all_exprs_are_scalar_like(*args: IntoExpr, **kwargs: IntoExpr) -> bool:\n # Raise if any argument in `args` isn't an aggregation or literal.\n # For Series input, we don't raise (yet), we let such checks happen later,\n # as this function works lazily and so can't evaluate lengths.\n exprs = chain(args, kwargs.values())\n return all(is_expr(x) and x._metadata.is_scalar_like for x in exprs)\n\n\ndef apply_n_ary_operation(\n plx: CompliantNamespaceAny,\n function: Any,\n *comparands: IntoExpr | NonNestedLiteral | _1DArray,\n str_as_lit: bool,\n) -> CompliantExprAny:\n compliant_exprs = (\n extract_compliant(plx, comparand, str_as_lit=str_as_lit)\n for comparand in comparands\n )\n kinds = [\n ExprKind.from_into_expr(comparand, str_as_lit=str_as_lit)\n for comparand in comparands\n ]\n\n broadcast = any(not kind.is_scalar_like for kind in kinds)\n compliant_exprs = (\n compliant_expr.broadcast(kind)\n if broadcast and is_compliant_expr(compliant_expr) and is_scalar_like(kind)\n else compliant_expr\n for compliant_expr, kind in zip(compliant_exprs, kinds)\n )\n return function(*compliant_exprs)\n
.venv\Lib\site-packages\narwhals\_expression_parsing.py
_expression_parsing.py
Python
21,828
0.95
0.184943
0.061185
react-lib
422
2024-12-25T18:09:53.746443
Apache-2.0
false
dda1c8aca49516eb16a0bb9c2bc62595
"""Narwhals-level equivalent of `CompliantNamespace`."""\n\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Generic,\n Literal,\n Protocol,\n TypeVar,\n overload,\n)\n\nfrom narwhals._compliant.typing import CompliantNamespaceAny, CompliantNamespaceT_co\nfrom narwhals._utils import Implementation, Version\nfrom narwhals.dependencies import (\n get_cudf,\n get_modin,\n get_pandas,\n get_polars,\n get_pyarrow,\n is_dask_dataframe,\n is_duckdb_relation,\n is_ibis_table,\n is_pyspark_connect_dataframe,\n is_pyspark_dataframe,\n is_sqlframe_dataframe,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Collection, Sized\n from types import ModuleType\n from typing import ClassVar\n\n import duckdb\n import pandas as pd\n import polars as pl\n import pyarrow as pa\n import pyspark.sql as pyspark_sql\n from pyspark.sql.connect.dataframe import DataFrame as PySparkConnectDataFrame\n from typing_extensions import Self, TypeAlias, TypeIs\n\n from narwhals._arrow.namespace import ArrowNamespace\n from narwhals._dask.namespace import DaskNamespace\n from narwhals._duckdb.namespace import DuckDBNamespace\n from narwhals._ibis.namespace import IbisNamespace\n from narwhals._pandas_like.namespace import PandasLikeNamespace\n from narwhals._polars.namespace import PolarsNamespace\n from narwhals._spark_like.dataframe import SQLFrameDataFrame\n from narwhals._spark_like.namespace import SparkLikeNamespace\n from narwhals.typing import DataFrameLike, NativeFrame, NativeLazyFrame, NativeSeries\n\n T = TypeVar("T")\n\n _Guard: TypeAlias = "Callable[[Any], TypeIs[T]]"\n\n _Polars: TypeAlias = Literal["polars"]\n _Arrow: TypeAlias = Literal["pyarrow"]\n _Dask: TypeAlias = Literal["dask"]\n _DuckDB: TypeAlias = Literal["duckdb"]\n _PandasLike: TypeAlias = Literal["pandas", "cudf", "modin"]\n _Ibis: TypeAlias = Literal["ibis"]\n _SparkLike: TypeAlias = Literal["pyspark", "sqlframe", "pyspark[connect]"]\n _EagerOnly: TypeAlias = "_PandasLike | _Arrow"\n _EagerAllowed: TypeAlias = "_Polars | _EagerOnly"\n _LazyOnly: TypeAlias = "_SparkLike | _Dask | _DuckDB | _Ibis"\n _LazyAllowed: TypeAlias = "_Polars | _LazyOnly"\n\n Polars: TypeAlias = Literal[_Polars, Implementation.POLARS]\n Arrow: TypeAlias = Literal[_Arrow, Implementation.PYARROW]\n Dask: TypeAlias = Literal[_Dask, Implementation.DASK]\n DuckDB: TypeAlias = Literal[_DuckDB, Implementation.DUCKDB]\n Ibis: TypeAlias = Literal[_Ibis, Implementation.IBIS]\n PandasLike: TypeAlias = Literal[\n _PandasLike, Implementation.PANDAS, Implementation.CUDF, Implementation.MODIN\n ]\n SparkLike: TypeAlias = Literal[\n _SparkLike,\n Implementation.PYSPARK,\n Implementation.SQLFRAME,\n Implementation.PYSPARK_CONNECT,\n ]\n EagerOnly: TypeAlias = "PandasLike | Arrow"\n EagerAllowed: TypeAlias = "EagerOnly | Polars"\n LazyOnly: TypeAlias = "SparkLike | Dask | DuckDB | Ibis"\n LazyAllowed: TypeAlias = "LazyOnly | Polars"\n\n BackendName: TypeAlias = "_EagerAllowed | _LazyAllowed"\n IntoBackend: TypeAlias = "BackendName | Implementation | ModuleType"\n\n EagerAllowedNamespace: TypeAlias = "Namespace[PandasLikeNamespace] | Namespace[ArrowNamespace] | Namespace[PolarsNamespace]"\n EagerAllowedImplementation: TypeAlias = Literal[\n Implementation.PANDAS,\n Implementation.CUDF,\n Implementation.MODIN,\n Implementation.PYARROW,\n Implementation.POLARS,\n ]\n\n class _BasePandasLike(Sized, Protocol):\n index: Any\n """`mypy` doesn't like the asymmetric `property` setter in `pandas`."""\n\n def __getitem__(self, key: Any, /) -> Any: ...\n def __mul__(self, other: float | Collection[float] | Self) -> Self: ...\n def __floordiv__(self, other: float | Collection[float] | Self) -> Self: ...\n @property\n def loc(self) -> Any: ...\n @property\n def shape(self) -> tuple[int, ...]: ...\n def set_axis(self, labels: Any, *, axis: Any = ..., copy: bool = ...) -> Self: ...\n def copy(self, deep: bool = ...) -> Self: ... # noqa: FBT001\n def rename(self, *args: Any, inplace: Literal[False], **kwds: Any) -> Self:\n """`inplace=False` is required to avoid (incorrect?) default overloads."""\n ...\n\n class _BasePandasLikeFrame(NativeFrame, _BasePandasLike, Protocol): ...\n\n class _BasePandasLikeSeries(NativeSeries, _BasePandasLike, Protocol):\n def where(self, cond: Any, other: Any = ..., **kwds: Any) -> Any: ...\n\n class _NativeDask(Protocol):\n _partition_type: type[pd.DataFrame]\n\n class _CuDFDataFrame(_BasePandasLikeFrame, Protocol):\n def to_pylibcudf(self, *args: Any, **kwds: Any) -> Any: ...\n\n class _CuDFSeries(_BasePandasLikeSeries, Protocol):\n def to_pylibcudf(self, *args: Any, **kwds: Any) -> Any: ...\n\n class _NativeIbis(Protocol):\n def sql(self, *args: Any, **kwds: Any) -> Any: ...\n def __pyarrow_result__(self, *args: Any, **kwds: Any) -> Any: ...\n def __pandas_result__(self, *args: Any, **kwds: Any) -> Any: ...\n def __polars_result__(self, *args: Any, **kwds: Any) -> Any: ...\n\n class _ModinDataFrame(_BasePandasLikeFrame, Protocol):\n _pandas_class: type[pd.DataFrame]\n\n class _ModinSeries(_BasePandasLikeSeries, Protocol):\n _pandas_class: type[pd.Series[Any]]\n\n _NativePolars: TypeAlias = "pl.DataFrame | pl.LazyFrame | pl.Series"\n _NativeArrow: TypeAlias = "pa.Table | pa.ChunkedArray[Any]"\n _NativeDuckDB: TypeAlias = "duckdb.DuckDBPyRelation"\n _NativePandas: TypeAlias = "pd.DataFrame | pd.Series[Any]"\n _NativeModin: TypeAlias = "_ModinDataFrame | _ModinSeries"\n _NativeCuDF: TypeAlias = "_CuDFDataFrame | _CuDFSeries"\n _NativePandasLikeSeries: TypeAlias = "pd.Series[Any] | _CuDFSeries | _ModinSeries"\n _NativePandasLikeDataFrame: TypeAlias = (\n "pd.DataFrame | _CuDFDataFrame | _ModinDataFrame"\n )\n _NativePandasLike: TypeAlias = "_NativePandasLikeDataFrame |_NativePandasLikeSeries"\n _NativeSQLFrame: TypeAlias = "SQLFrameDataFrame"\n _NativePySpark: TypeAlias = "pyspark_sql.DataFrame"\n _NativePySparkConnect: TypeAlias = "PySparkConnectDataFrame"\n _NativeSparkLike: TypeAlias = (\n "_NativeSQLFrame | _NativePySpark | _NativePySparkConnect"\n )\n\n NativeKnown: TypeAlias = "_NativePolars | _NativeArrow | _NativePandasLike | _NativeSparkLike | _NativeDuckDB | _NativeDask | _NativeIbis"\n NativeUnknown: TypeAlias = (\n "NativeFrame | NativeSeries | NativeLazyFrame | DataFrameLike"\n )\n NativeAny: TypeAlias = "NativeKnown | NativeUnknown"\n\n__all__ = ["Namespace"]\n\n\nclass Namespace(Generic[CompliantNamespaceT_co]):\n _compliant_namespace: CompliantNamespaceT_co\n _version: ClassVar[Version] = Version.MAIN\n\n def __init__(self, namespace: CompliantNamespaceT_co, /) -> None:\n self._compliant_namespace = namespace\n\n def __init_subclass__(cls, *args: Any, version: Version, **kwds: Any) -> None:\n super().__init_subclass__(*args, **kwds)\n\n if isinstance(version, Version):\n cls._version = version\n else:\n msg = f"Expected {Version} but got {type(version).__name__!r}"\n raise TypeError(msg)\n\n def __repr__(self) -> str:\n return f"Namespace[{type(self.compliant).__name__}]"\n\n @property\n def compliant(self) -> CompliantNamespaceT_co:\n return self._compliant_namespace\n\n @property\n def implementation(self) -> Implementation:\n return self.compliant._implementation\n\n @property\n def version(self) -> Version:\n return self._version\n\n @overload\n @classmethod\n def from_backend(cls, backend: PandasLike, /) -> Namespace[PandasLikeNamespace]: ...\n\n @overload\n @classmethod\n def from_backend(cls, backend: Polars, /) -> Namespace[PolarsNamespace]: ...\n\n @overload\n @classmethod\n def from_backend(cls, backend: Arrow, /) -> Namespace[ArrowNamespace]: ...\n\n @overload\n @classmethod\n def from_backend(cls, backend: SparkLike, /) -> Namespace[SparkLikeNamespace]: ...\n\n @overload\n @classmethod\n def from_backend(cls, backend: DuckDB, /) -> Namespace[DuckDBNamespace]: ...\n\n @overload\n @classmethod\n def from_backend(cls, backend: Dask, /) -> Namespace[DaskNamespace]: ...\n\n @overload\n @classmethod\n def from_backend(cls, backend: Ibis, /) -> Namespace[IbisNamespace]: ...\n\n @overload\n @classmethod\n def from_backend(cls, backend: EagerAllowed, /) -> EagerAllowedNamespace: ...\n\n @overload\n @classmethod\n def from_backend(\n cls, backend: IntoBackend, /\n ) -> Namespace[CompliantNamespaceAny]: ...\n\n @classmethod\n def from_backend(\n cls: type[Namespace[Any]], backend: IntoBackend, /\n ) -> Namespace[Any]:\n """Instantiate from native namespace module, string, or Implementation.\n\n Arguments:\n backend: native namespace module, string, or Implementation.\n\n Returns:\n Namespace.\n\n Examples:\n >>> from narwhals._namespace import Namespace\n >>> Namespace.from_backend("polars")\n Namespace[PolarsNamespace]\n """\n impl = Implementation.from_backend(backend)\n backend_version = impl._backend_version()\n version = cls._version\n ns: CompliantNamespaceAny\n if impl.is_pandas_like():\n from narwhals._pandas_like.namespace import PandasLikeNamespace\n\n ns = PandasLikeNamespace(\n implementation=impl, backend_version=backend_version, version=version\n )\n\n elif impl.is_polars():\n from narwhals._polars.namespace import PolarsNamespace\n\n ns = PolarsNamespace(backend_version=backend_version, version=version)\n elif impl.is_pyarrow():\n from narwhals._arrow.namespace import ArrowNamespace\n\n ns = ArrowNamespace(backend_version=backend_version, version=version)\n elif impl.is_spark_like():\n from narwhals._spark_like.namespace import SparkLikeNamespace\n\n ns = SparkLikeNamespace(\n implementation=impl, backend_version=backend_version, version=version\n )\n elif impl.is_duckdb():\n from narwhals._duckdb.namespace import DuckDBNamespace\n\n ns = DuckDBNamespace(backend_version=backend_version, version=version)\n elif impl.is_dask():\n from narwhals._dask.namespace import DaskNamespace\n\n ns = DaskNamespace(backend_version=backend_version, version=version)\n elif impl.is_ibis():\n from narwhals._ibis.namespace import IbisNamespace\n\n ns = IbisNamespace(backend_version=backend_version, version=version)\n else:\n msg = "Not supported Implementation" # pragma: no cover\n raise AssertionError(msg)\n return cls(ns)\n\n @overload\n @classmethod\n def from_native_object(\n cls, native: _NativePolars, /\n ) -> Namespace[PolarsNamespace]: ...\n\n @overload\n @classmethod\n def from_native_object(\n cls, native: _NativePandas, /\n ) -> Namespace[PandasLikeNamespace[pd.DataFrame, pd.Series[Any]]]: ...\n\n @overload\n @classmethod\n def from_native_object(cls, native: _NativeArrow, /) -> Namespace[ArrowNamespace]: ...\n\n @overload\n @classmethod\n def from_native_object(\n cls, native: _NativeSparkLike, /\n ) -> Namespace[SparkLikeNamespace]: ...\n\n @overload\n @classmethod\n def from_native_object(\n cls, native: _NativeDuckDB, /\n ) -> Namespace[DuckDBNamespace]: ...\n\n @overload\n @classmethod\n def from_native_object(cls, native: _NativeDask, /) -> Namespace[DaskNamespace]: ...\n\n @overload\n @classmethod\n def from_native_object(cls, native: _NativeIbis, /) -> Namespace[IbisNamespace]: ...\n\n @overload\n @classmethod\n def from_native_object(\n cls, native: _NativeModin, /\n ) -> Namespace[PandasLikeNamespace[_ModinDataFrame, _ModinSeries]]: ...\n\n @overload\n @classmethod\n def from_native_object(\n cls, native: _NativeCuDF, /\n ) -> Namespace[PandasLikeNamespace[_CuDFDataFrame, _CuDFSeries]]: ...\n\n @overload\n @classmethod\n def from_native_object(\n cls, native: _NativePandasLike, /\n ) -> Namespace[PandasLikeNamespace[Any, Any]]: ...\n\n @overload\n @classmethod\n def from_native_object(\n cls, native: NativeUnknown, /\n ) -> Namespace[CompliantNamespaceAny]: ...\n\n @classmethod\n def from_native_object( # noqa: PLR0911\n cls: type[Namespace[Any]], native: NativeAny, /\n ) -> Namespace[Any]:\n if is_native_polars(native):\n return cls.from_backend(Implementation.POLARS)\n elif is_native_pandas(native):\n return cls.from_backend(Implementation.PANDAS)\n elif is_native_arrow(native):\n return cls.from_backend(Implementation.PYARROW)\n elif is_native_spark_like(native):\n return cls.from_backend(\n Implementation.SQLFRAME\n if is_native_sqlframe(native)\n else Implementation.PYSPARK_CONNECT\n if is_native_pyspark_connect(native)\n else Implementation.PYSPARK\n )\n elif is_native_dask(native):\n return cls.from_backend(Implementation.DASK) # pragma: no cover\n elif is_native_duckdb(native):\n return cls.from_backend(Implementation.DUCKDB)\n elif is_native_cudf(native): # pragma: no cover\n return cls.from_backend(Implementation.CUDF)\n elif is_native_modin(native): # pragma: no cover\n return cls.from_backend(Implementation.MODIN)\n elif is_native_ibis(native):\n return cls.from_backend(Implementation.IBIS)\n else:\n msg = f"Unsupported type: {type(native).__qualname__!r}"\n raise TypeError(msg)\n\n\ndef is_native_polars(obj: Any) -> TypeIs[_NativePolars]:\n return (pl := get_polars()) is not None and isinstance(\n obj, (pl.DataFrame, pl.Series, pl.LazyFrame)\n )\n\n\ndef is_native_arrow(obj: Any) -> TypeIs[_NativeArrow]:\n return (pa := get_pyarrow()) is not None and isinstance(\n obj, (pa.Table, pa.ChunkedArray)\n )\n\n\ndef is_native_dask(obj: Any) -> TypeIs[_NativeDask]:\n return is_dask_dataframe(obj)\n\n\nis_native_duckdb: _Guard[_NativeDuckDB] = is_duckdb_relation\nis_native_sqlframe: _Guard[_NativeSQLFrame] = is_sqlframe_dataframe\nis_native_pyspark: _Guard[_NativePySpark] = is_pyspark_dataframe\nis_native_pyspark_connect: _Guard[_NativePySparkConnect] = is_pyspark_connect_dataframe\n\n\ndef is_native_pandas(obj: Any) -> TypeIs[_NativePandas]:\n return (pd := get_pandas()) is not None and isinstance(obj, (pd.DataFrame, pd.Series))\n\n\ndef is_native_modin(obj: Any) -> TypeIs[_NativeModin]:\n return (mpd := get_modin()) is not None and isinstance(\n obj, (mpd.DataFrame, mpd.Series)\n ) # pragma: no cover\n\n\ndef is_native_cudf(obj: Any) -> TypeIs[_NativeCuDF]:\n return (cudf := get_cudf()) is not None and isinstance(\n obj, (cudf.DataFrame, cudf.Series)\n ) # pragma: no cover\n\n\ndef is_native_pandas_like(obj: Any) -> TypeIs[_NativePandasLike]:\n return (\n is_native_pandas(obj) or is_native_cudf(obj) or is_native_modin(obj)\n ) # pragma: no cover\n\n\ndef is_native_spark_like(obj: Any) -> TypeIs[_NativeSparkLike]:\n return (\n is_native_sqlframe(obj)\n or is_native_pyspark(obj)\n or is_native_pyspark_connect(obj)\n )\n\n\ndef is_native_ibis(obj: Any) -> TypeIs[_NativeIbis]:\n return is_ibis_table(obj)\n
.venv\Lib\site-packages\narwhals\_namespace.py
_namespace.py
Python
15,711
0.95
0.152466
0
awesome-app
578
2023-12-26T01:21:46.005101
BSD-3-Clause
false
1f91ef46898b6744763b0bb273303cf8
"""[Protocols] defining conversion methods between representations.\n\nThese come in 3 flavors and are [generic] to promote reuse.\n\nThe following examples use the placeholder types `Narwhal` and `Other`:\n- `Narwhal`: some class written in `narwhals`.\n- `Other`: any other class, could be native, compliant, or a builtin.\n\n## `To<Other>`\nWhen we want to convert or unwrap a `Narwhal` into an `Other`,\nwe provide an **instance** method:\n\n ToOtherT_co = TypeVar("ToOtherT_co", covariant=True)\n\n class ToOther(Protocol[ToOtherT_co]):\n def to_other(self, *args: Any, **kwds: Any) -> ToOtherT_co: ...\n\n- `*args`, `**kwds` are defined to be *permissive* and allow a wider set of signatures when implementing.\n - In most cases, they are unused.\n - But come in handy when adapting an [upstream signature].\n- We use a **covariant** `TypeVar`.\n\n## `From<Other>`\nBut what if we have `Other` and want to do the reverse?\n\nOur `Narwhal` will need to provide a `@classmethod`:\n\n FromOtherT_contra = TypeVar("FromOtherT_contra", contravariant=True)\n\n class FromOther(Protocol[FromOtherT_contra]):\n @classmethod\n def from_other(cls, data: FromOtherT_contra, *args: Any, **kwds: Any) -> Self: ...\n\n- `*args`, `**kwds` serve a similar purpose as before, but are much more frequently used.\n- We've added a **required** [positional-only] parameter `data` which will always be passed `Other`.\n - This removes the name from the contract of the protocol.\n - Implementations are free to use something more descriptive for documentation purposes.\n- We use a **contravariant** `TypeVar`.\n\n## `<Other>Convertible`\nCombining our `to_` and `from_` methods allows us to convert in both directions `Narwhal` <-> `Other`:\n\n class OtherConvertible(\n ToOther[ToOtherT_co],\n FromOther[FromOtherT_contra],\n Protocol[ToOtherT_co, FromOtherT_contra],\n ): ...\n\n## See Also\nVariance of `TypeVar`(s) can be tricky to wrap your head around.\n\nTo learn more see [moist], [dry], or [even drier] - depending on how deep you wanna go.\n\n[Protocols]: https://typing.python.org/en/latest/spec/protocol.html\n[generic]: https://typing.python.org/en/latest/spec/generics.html\n[upstream signature]: https://numpy.org/doc/stable/user/basics.interoperability.html#the-array-method\n[positional-only]: https://peps.python.org/pep-0570/\n[moist]: https://mypy.readthedocs.io/en/stable/generics.html#variance-of-generic-types\n[dry]: https://typing.python.org/en/latest/spec/generics.html#variance\n[even drier]: https://en.wikipedia.org/wiki/Covariance_and_contravariance_%28computer_science%29\n"""\n\nfrom __future__ import annotations\n\nfrom collections.abc import Iterable, Mapping\nfrom typing import TYPE_CHECKING, Any, Protocol\n\nfrom narwhals._typing_compat import TypeVar\n\nif TYPE_CHECKING:\n import pyarrow as pa\n from typing_extensions import Self, TypeAlias, TypeIs\n\n\nclass ArrowStreamExportable(Protocol):\n def __arrow_c_stream__(self, requested_schema: object | None = None) -> object: ...\n\n\nToNumpyT_co = TypeVar("ToNumpyT_co", covariant=True)\nFromNumpyDT_contra = TypeVar(\n "FromNumpyDT_contra", contravariant=True, default=ToNumpyT_co\n)\nFromNumpyT_contra = TypeVar("FromNumpyT_contra", contravariant=True)\n\n\nclass ToNumpy(Protocol[ToNumpyT_co]):\n def to_numpy(self, *args: Any, **kwds: Any) -> ToNumpyT_co: ...\n\n\nclass FromNumpy(Protocol[FromNumpyT_contra]):\n @classmethod\n def from_numpy(cls, data: FromNumpyT_contra, *args: Any, **kwds: Any) -> Self: ...\n\n\nclass NumpyConvertible(\n ToNumpy[ToNumpyT_co],\n FromNumpy[FromNumpyDT_contra],\n Protocol[ToNumpyT_co, FromNumpyDT_contra],\n):\n def to_numpy(self, dtype: Any, *, copy: bool | None) -> ToNumpyT_co: ...\n\n\nFromIterableT_contra = TypeVar("FromIterableT_contra", contravariant=True, default=Any)\n\n\nclass FromIterable(Protocol[FromIterableT_contra]):\n @classmethod\n def from_iterable(\n cls, data: Iterable[FromIterableT_contra], *args: Any, **kwds: Any\n ) -> Self: ...\n\n\nToDictDT_co = TypeVar(\n "ToDictDT_co", bound=Mapping[str, Any], covariant=True, default="dict[str, Any]"\n)\nFromDictDT_contra = TypeVar(\n "FromDictDT_contra",\n bound=Mapping[str, Any],\n contravariant=True,\n default=Mapping[str, Any],\n)\n\n\nclass ToDict(Protocol[ToDictDT_co]):\n def to_dict(self, *args: Any, **kwds: Any) -> ToDictDT_co: ...\n\n\nclass FromDict(Protocol[FromDictDT_contra]):\n @classmethod\n def from_dict(cls, data: FromDictDT_contra, *args: Any, **kwds: Any) -> Self: ...\n\n\nclass DictConvertible(\n ToDict[ToDictDT_co],\n FromDict[FromDictDT_contra],\n Protocol[ToDictDT_co, FromDictDT_contra],\n): ...\n\n\nIntoArrowTable: TypeAlias = "ArrowStreamExportable | pa.Table"\n"""An object supporting the [Arrow PyCapsule Interface], or a native [`pyarrow.Table`].\n\n[Arrow PyCapsule Interface]: https://arrow.apache.org/docs/format/CDataInterface/PyCapsuleInterface.html#arrowstream-export\n[`pyarrow.Table`]: https://arrow.apache.org/docs/python/generated/pyarrow.Table.html\n"""\nToArrowT_co = TypeVar("ToArrowT_co", covariant=True)\nFromArrowDT_contra = TypeVar(\n "FromArrowDT_contra", contravariant=True, default=IntoArrowTable\n)\n\n\nclass ToArrow(Protocol[ToArrowT_co]):\n def to_arrow(self, *args: Any, **kwds: Any) -> ToArrowT_co: ...\n\n\nclass FromArrow(Protocol[FromArrowDT_contra]):\n @classmethod\n def from_arrow(cls, data: FromArrowDT_contra, *args: Any, **kwds: Any) -> Self: ...\n\n\nclass ArrowConvertible(\n ToArrow[ToArrowT_co],\n FromArrow[FromArrowDT_contra],\n Protocol[ToArrowT_co, FromArrowDT_contra],\n): ...\n\n\nFromNativeT = TypeVar("FromNativeT")\n\n\nclass FromNative(Protocol[FromNativeT]):\n @classmethod\n def from_native(cls, data: FromNativeT, *args: Any, **kwds: Any) -> Self: ...\n @staticmethod\n def _is_native(obj: FromNativeT | Any, /) -> TypeIs[FromNativeT]:\n """Return `True` if `obj` can be passed to `from_native`."""\n ...\n\n\nToNarwhalsT_co = TypeVar("ToNarwhalsT_co", covariant=True)\n\n\nclass ToNarwhals(Protocol[ToNarwhalsT_co]):\n def to_narwhals(self) -> ToNarwhalsT_co:\n """Convert into public representation."""\n ...\n
.venv\Lib\site-packages\narwhals\_translate.py
_translate.py
Python
6,112
0.95
0.193548
0.031496
vue-tools
833
2023-11-22T04:49:07.941679
Apache-2.0
false
5b520a66dac0ff54a1440185a11f24e0
"""Backward compatibility for newer/less buggy typing features.\n\n## Important\nImport from here to avoid introducing a runtime dependency on [`typing_extensions`]\n\n## Notes\n- `Protocol38`\n - https://github.com/narwhals-dev/narwhals/pull/2064#discussion_r1965921386\n - https://github.com/narwhals-dev/narwhals/pull/2294#discussion_r2014534830\n- `TypeVar` defaults\n - https://typing.python.org/en/latest/spec/generics.html#type-parameter-defaults\n - https://peps.python.org/pep-0696/\n- `@deprecated`\n - https://docs.python.org/3/library/warnings.html#warnings.deprecated\n - https://typing.python.org/en/latest/spec/directives.html#deprecated\n - https://peps.python.org/pep-0702/\n\n[`typing_extensions`]: https://github.com/python/typing_extensions\n"""\n\nfrom __future__ import annotations\n\n# ruff: noqa: ARG001, ANN202, N802\nimport sys\nfrom typing import TYPE_CHECKING, Any\n\nif TYPE_CHECKING:\n from typing import Callable, Protocol as Protocol38\n\n if sys.version_info >= (3, 13):\n from typing import TypeVar\n from warnings import deprecated\n else:\n from typing_extensions import TypeVar, deprecated\n\n if sys.version_info >= (3, 11):\n from typing import Never, assert_never\n else:\n from typing_extensions import Never, assert_never\n\n _Fn = TypeVar("_Fn", bound=Callable[..., Any])\n\n\nelse: # pragma: no cover\n if sys.version_info >= (3, 13):\n from typing import TypeVar\n from warnings import deprecated\n else:\n from typing import TypeVar as _TypeVar\n\n def TypeVar(\n name: str,\n *constraints: Any,\n bound: Any | None = None,\n covariant: bool = False,\n contravariant: bool = False,\n **kwds: Any,\n ):\n return _TypeVar(\n name,\n *constraints,\n bound=bound,\n covariant=covariant,\n contravariant=contravariant,\n )\n\n def deprecated(message: str, /) -> Callable[[_Fn], _Fn]:\n def wrapper(func: _Fn, /) -> _Fn:\n return func\n\n return wrapper\n\n _ASSERT_NEVER_REPR_MAX_LENGTH = 100\n _BUG_URL = (\n "https://github.com/narwhals-dev/narwhals/issues/new?template=bug_report.yml"\n )\n\n def assert_never(arg: Never, /) -> Never:\n value = repr(arg)\n if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH:\n value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + "..."\n msg = (\n f"Expected code to be unreachable, but got: {value}.\n"\n f"Please report an issue at {_BUG_URL}"\n )\n raise AssertionError(msg)\n\n # TODO @dangotbanned: Remove after dropping `3.8` (#2084)\n # - https://github.com/narwhals-dev/narwhals/pull/2064#discussion_r1965921386\n from typing import Protocol as Protocol38\n\n\n__all__ = ["Protocol38", "TypeVar", "assert_never", "deprecated"]\n
.venv\Lib\site-packages\narwhals\_typing_compat.py
_typing_compat.py
Python
2,916
0.95
0.107527
0.108108
node-utils
569
2024-07-03T21:11:42.449414
Apache-2.0
false
3d21e8f1c791c60783414551da09d548
from __future__ import annotations\n\nimport os\nimport re\nfrom collections.abc import Collection, Container, Iterable, Iterator, Mapping, Sequence\nfrom datetime import timezone\nfrom enum import Enum, auto\nfrom functools import lru_cache, wraps\nfrom importlib.util import find_spec\nfrom inspect import getattr_static, getdoc\nfrom secrets import token_hex\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Generic,\n Literal,\n Protocol,\n TypeVar,\n Union,\n cast,\n overload,\n)\nfrom warnings import warn\n\nfrom narwhals._enum import NoAutoEnum\nfrom narwhals._typing_compat import deprecated\nfrom narwhals.dependencies import (\n get_cudf,\n get_dask_dataframe,\n get_duckdb,\n get_ibis,\n get_modin,\n get_pandas,\n get_polars,\n get_pyarrow,\n get_pyspark_connect,\n get_pyspark_sql,\n get_sqlframe,\n is_narwhals_series,\n is_narwhals_series_int,\n is_numpy_array_1d,\n is_numpy_array_1d_int,\n is_pandas_like_dataframe,\n is_pandas_like_series,\n is_polars_series,\n is_pyarrow_chunked_array,\n)\nfrom narwhals.exceptions import ColumnNotFoundError, DuplicateError, InvalidOperationError\n\nif TYPE_CHECKING:\n from collections.abc import Set # noqa: PYI025\n from types import ModuleType\n\n import pandas as pd\n import polars as pl\n import pyarrow as pa\n from typing_extensions import (\n Concatenate,\n LiteralString,\n ParamSpec,\n Self,\n TypeAlias,\n TypeIs,\n )\n\n from narwhals._compliant import (\n CompliantExpr,\n CompliantExprT,\n CompliantFrameT,\n CompliantSeriesOrNativeExprT_co,\n CompliantSeriesT,\n NativeFrameT_co,\n NativeSeriesT_co,\n )\n from narwhals._compliant.typing import EvalNames\n from narwhals._namespace import EagerAllowedImplementation, Namespace\n from narwhals._translate import ArrowStreamExportable, IntoArrowTable, ToNarwhalsT_co\n from narwhals.dataframe import DataFrame, LazyFrame\n from narwhals.dtypes import DType\n from narwhals.series import Series\n from narwhals.typing import (\n CompliantDataFrame,\n CompliantLazyFrame,\n CompliantSeries,\n DataFrameLike,\n DTypes,\n IntoSeriesT,\n MultiIndexSelector,\n SingleIndexSelector,\n SizedMultiIndexSelector,\n SizeUnit,\n SupportsNativeNamespace,\n TimeUnit,\n _1DArray,\n _SliceIndex,\n _SliceName,\n _SliceNone,\n )\n\n FrameOrSeriesT = TypeVar(\n "FrameOrSeriesT", bound=Union[LazyFrame[Any], DataFrame[Any], Series[Any]]\n )\n\n _T1 = TypeVar("_T1")\n _T2 = TypeVar("_T2")\n _T3 = TypeVar("_T3")\n _T4 = TypeVar("_T4")\n _T5 = TypeVar("_T5")\n _T6 = TypeVar("_T6")\n _T7 = TypeVar("_T7")\n _Fn = TypeVar("_Fn", bound="Callable[..., Any]")\n P = ParamSpec("P")\n R = TypeVar("R")\n R1 = TypeVar("R1")\n R2 = TypeVar("R2")\n\n class _SupportsVersion(Protocol):\n __version__: str\n\n class _SupportsGet(Protocol): # noqa: PYI046\n def __get__(self, instance: Any, owner: Any | None = None, /) -> Any: ...\n\n class _StoresImplementation(Protocol):\n _implementation: Implementation\n """Implementation of native object (pandas, Polars, PyArrow, ...)."""\n\n class _StoresBackendVersion(Protocol):\n _backend_version: tuple[int, ...]\n """Version tuple for a native package."""\n\n class _StoresVersion(Protocol):\n _version: Version\n """Narwhals API version (V1 or MAIN)."""\n\n class _LimitedContext(_StoresBackendVersion, _StoresVersion, Protocol):\n """Provides 2 attributes.\n\n - `_backend_version`\n - `_version`\n """\n\n class _FullContext(_StoresImplementation, _LimitedContext, Protocol):\n """Provides 3 attributes.\n\n - `_implementation`\n - `_backend_version`\n - `_version`\n """\n\n class _StoresColumns(Protocol):\n @property\n def columns(self) -> Sequence[str]: ...\n\n\n_T = TypeVar("_T")\nNativeT_co = TypeVar("NativeT_co", covariant=True)\nCompliantT_co = TypeVar("CompliantT_co", covariant=True)\n_ContextT = TypeVar("_ContextT", bound="_FullContext")\n_Method: TypeAlias = "Callable[Concatenate[_ContextT, P], R]"\n_Constructor: TypeAlias = "Callable[Concatenate[_T, P], R2]"\n\n\nclass _StoresNative(Protocol[NativeT_co]): # noqa: PYI046\n """Provides access to a native object.\n\n Native objects have types like:\n\n >>> from pandas import Series\n >>> from pyarrow import Table\n """\n\n @property\n def native(self) -> NativeT_co:\n """Return the native object."""\n ...\n\n\nclass _StoresCompliant(Protocol[CompliantT_co]): # noqa: PYI046\n """Provides access to a compliant object.\n\n Compliant objects have types like:\n\n >>> from narwhals._pandas_like.series import PandasLikeSeries\n >>> from narwhals._arrow.dataframe import ArrowDataFrame\n """\n\n @property\n def compliant(self) -> CompliantT_co:\n """Return the compliant object."""\n ...\n\n\nclass Version(Enum):\n V1 = auto()\n MAIN = auto()\n\n @property\n def namespace(self) -> type[Namespace[Any]]:\n if self is Version.MAIN:\n from narwhals._namespace import Namespace\n\n return Namespace\n from narwhals.stable.v1._namespace import Namespace\n\n return Namespace\n\n @property\n def dtypes(self) -> DTypes:\n if self is Version.MAIN:\n from narwhals import dtypes\n\n return dtypes\n from narwhals.stable.v1 import dtypes as v1_dtypes\n\n return v1_dtypes\n\n @property\n def dataframe(self) -> type[DataFrame[Any]]:\n if self is Version.MAIN:\n from narwhals.dataframe import DataFrame\n\n return DataFrame\n from narwhals.stable.v1 import DataFrame as DataFrameV1\n\n return DataFrameV1\n\n @property\n def lazyframe(self) -> type[LazyFrame[Any]]:\n if self is Version.MAIN:\n from narwhals.dataframe import LazyFrame\n\n return LazyFrame\n from narwhals.stable.v1 import LazyFrame as LazyFrameV1\n\n return LazyFrameV1\n\n @property\n def series(self) -> type[Series[Any]]:\n if self is Version.MAIN:\n from narwhals.series import Series\n\n return Series\n from narwhals.stable.v1 import Series as SeriesV1\n\n return SeriesV1\n\n\nclass Implementation(NoAutoEnum):\n """Implementation of native object (pandas, Polars, PyArrow, ...)."""\n\n PANDAS = "pandas"\n """pandas implementation."""\n MODIN = "modin"\n """Modin implementation."""\n CUDF = "cudf"\n """cuDF implementation."""\n PYARROW = "pyarrow"\n """PyArrow implementation."""\n PYSPARK = "pyspark"\n """PySpark implementation."""\n POLARS = "polars"\n """Polars implementation."""\n DASK = "dask"\n """Dask implementation."""\n DUCKDB = "duckdb"\n """DuckDB implementation."""\n IBIS = "ibis"\n """Ibis implementation."""\n SQLFRAME = "sqlframe"\n """SQLFrame implementation."""\n PYSPARK_CONNECT = "pyspark[connect]"\n """PySpark Connect implementation."""\n UNKNOWN = "unknown"\n """Unknown implementation."""\n\n def __str__(self) -> str:\n return str(self.value)\n\n @classmethod\n def from_native_namespace(\n cls: type[Self], native_namespace: ModuleType\n ) -> Implementation: # pragma: no cover\n """Instantiate Implementation object from a native namespace module.\n\n Arguments:\n native_namespace: Native namespace.\n\n Returns:\n Implementation.\n """\n mapping = {\n get_pandas(): Implementation.PANDAS,\n get_modin(): Implementation.MODIN,\n get_cudf(): Implementation.CUDF,\n get_pyarrow(): Implementation.PYARROW,\n get_pyspark_sql(): Implementation.PYSPARK,\n get_polars(): Implementation.POLARS,\n get_dask_dataframe(): Implementation.DASK,\n get_duckdb(): Implementation.DUCKDB,\n get_ibis(): Implementation.IBIS,\n get_sqlframe(): Implementation.SQLFRAME,\n get_pyspark_connect(): Implementation.PYSPARK_CONNECT,\n }\n return mapping.get(native_namespace, Implementation.UNKNOWN)\n\n @classmethod\n def from_string(\n cls: type[Self], backend_name: str\n ) -> Implementation: # pragma: no cover\n """Instantiate Implementation object from a native namespace module.\n\n Arguments:\n backend_name: Name of backend, expressed as string.\n\n Returns:\n Implementation.\n """\n try:\n return cls(backend_name)\n except ValueError:\n return Implementation.UNKNOWN\n\n @classmethod\n def from_backend(\n cls: type[Self], backend: str | Implementation | ModuleType\n ) -> Implementation:\n """Instantiate from native namespace module, string, or Implementation.\n\n Arguments:\n backend: Backend to instantiate Implementation from.\n\n Returns:\n Implementation.\n """\n return (\n cls.from_string(backend)\n if isinstance(backend, str)\n else backend\n if isinstance(backend, Implementation)\n else cls.from_native_namespace(backend)\n )\n\n def to_native_namespace(self) -> ModuleType:\n """Return the native namespace module corresponding to Implementation.\n\n Returns:\n Native module.\n """\n if self is Implementation.UNKNOWN:\n msg = "Cannot return native namespace from UNKNOWN Implementation"\n raise AssertionError(msg)\n\n validate_backend_version(self, self._backend_version())\n\n module_name = _IMPLEMENTATION_TO_MODULE_NAME.get(self, self.value)\n return _import_native_namespace(module_name)\n\n def is_pandas(self) -> bool:\n """Return whether implementation is pandas.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_pandas()\n True\n """\n return self is Implementation.PANDAS\n\n def is_pandas_like(self) -> bool:\n """Return whether implementation is pandas, Modin, or cuDF.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_pandas_like()\n True\n """\n return self in {Implementation.PANDAS, Implementation.MODIN, Implementation.CUDF}\n\n def is_spark_like(self) -> bool:\n """Return whether implementation is pyspark or sqlframe.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import pandas as pd\n >>> import narwhals as nw\n >>> df_native = pd.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_spark_like()\n False\n """\n return self in {\n Implementation.PYSPARK,\n Implementation.SQLFRAME,\n Implementation.PYSPARK_CONNECT,\n }\n\n def is_polars(self) -> bool:\n """Return whether implementation is Polars.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_polars()\n True\n """\n return self is Implementation.POLARS\n\n def is_cudf(self) -> bool:\n """Return whether implementation is cuDF.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_cudf()\n False\n """\n return self is Implementation.CUDF # pragma: no cover\n\n def is_modin(self) -> bool:\n """Return whether implementation is Modin.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_modin()\n False\n """\n return self is Implementation.MODIN # pragma: no cover\n\n def is_pyspark(self) -> bool:\n """Return whether implementation is PySpark.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_pyspark()\n False\n """\n return self is Implementation.PYSPARK # pragma: no cover\n\n def is_pyspark_connect(self) -> bool:\n """Return whether implementation is PySpark.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_pyspark_connect()\n False\n """\n return self is Implementation.PYSPARK_CONNECT # pragma: no cover\n\n def is_pyarrow(self) -> bool:\n """Return whether implementation is PyArrow.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_pyarrow()\n False\n """\n return self is Implementation.PYARROW # pragma: no cover\n\n def is_dask(self) -> bool:\n """Return whether implementation is Dask.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_dask()\n False\n """\n return self is Implementation.DASK # pragma: no cover\n\n def is_duckdb(self) -> bool:\n """Return whether implementation is DuckDB.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_duckdb()\n False\n """\n return self is Implementation.DUCKDB # pragma: no cover\n\n def is_ibis(self) -> bool:\n """Return whether implementation is Ibis.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_ibis()\n False\n """\n return self is Implementation.IBIS # pragma: no cover\n\n def is_sqlframe(self) -> bool:\n """Return whether implementation is SQLFrame.\n\n Returns:\n Boolean.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_native = pl.DataFrame({"a": [1, 2, 3]})\n >>> df = nw.from_native(df_native)\n >>> df.implementation.is_sqlframe()\n False\n """\n return self is Implementation.SQLFRAME # pragma: no cover\n\n def _backend_version(self) -> tuple[int, ...]:\n """Returns backend version.\n\n As a biproduct of loading the native namespace, we also store it as an attribute\n under the `_native_namespace` name.\n """\n if self is Implementation.UNKNOWN: # pragma: no cover\n msg = "Cannot return backend version from UNKNOWN Implementation"\n raise AssertionError(msg)\n\n module_name = _IMPLEMENTATION_TO_MODULE_NAME.get(self, self.value)\n native_namespace = _import_native_namespace(module_name)\n\n into_version: ModuleType | str\n if self.is_sqlframe():\n import sqlframe._version\n\n into_version = sqlframe._version\n elif self.is_pyspark() or self.is_pyspark_connect(): # pragma: no cover\n import pyspark # ignore-banned-import\n\n into_version = pyspark\n elif self.is_dask():\n import dask # ignore-banned-import\n\n into_version = dask\n else:\n into_version = native_namespace\n\n return parse_version(version=into_version)\n\n\nMIN_VERSIONS: Mapping[Implementation, tuple[int, ...]] = {\n Implementation.PANDAS: (1, 1, 3),\n Implementation.MODIN: (0, 8, 2),\n Implementation.CUDF: (24, 10),\n Implementation.PYARROW: (11,),\n Implementation.PYSPARK: (3, 5),\n Implementation.PYSPARK_CONNECT: (3, 5),\n Implementation.POLARS: (0, 20, 4),\n Implementation.DASK: (2024, 8),\n Implementation.DUCKDB: (1,),\n Implementation.IBIS: (6,),\n Implementation.SQLFRAME: (3, 22, 0),\n}\n\n_IMPLEMENTATION_TO_MODULE_NAME: Mapping[Implementation, str] = {\n Implementation.DASK: "dask.dataframe",\n Implementation.MODIN: "modin.pandas",\n Implementation.PYSPARK: "pyspark.sql",\n Implementation.PYSPARK_CONNECT: "pyspark.sql.connect",\n}\n"""Stores non default mapping from Implementation to module name"""\n\n\ndef validate_backend_version(\n implementation: Implementation, backend_version: tuple[int, ...]\n) -> None:\n if backend_version < (min_version := MIN_VERSIONS[implementation]):\n msg = f"Minimum version of {implementation} supported by Narwhals is {min_version}, found: {backend_version}"\n raise ValueError(msg)\n\n\n@lru_cache(maxsize=16)\ndef _import_native_namespace(module_name: str) -> ModuleType:\n from importlib import import_module\n\n return import_module(module_name)\n\n\ndef flatten(args: Any) -> list[Any]:\n return list(args[0] if (len(args) == 1 and _is_iterable(args[0])) else args)\n\n\ndef tupleify(arg: Any) -> Any:\n if not isinstance(arg, (list, tuple)): # pragma: no cover\n return (arg,)\n return arg\n\n\ndef _is_iterable(arg: Any | Iterable[Any]) -> bool:\n from narwhals.series import Series\n\n if (\n (pd := get_pandas()) is not None and isinstance(arg, (pd.Series, pd.DataFrame))\n ) or (\n (pl := get_polars()) is not None\n and isinstance(arg, (pl.Series, pl.Expr, pl.DataFrame, pl.LazyFrame))\n ):\n # Non-exhaustive check for common potential mistakes.\n msg = (\n f"Expected Narwhals class or scalar, got: {qualified_type_name(arg)!r}.\n\n"\n "Hint: Perhaps you\n"\n "- forgot a `nw.from_native` somewhere?\n"\n "- used `pl.col` instead of `nw.col`?"\n )\n raise TypeError(msg)\n\n return isinstance(arg, Iterable) and not isinstance(arg, (str, bytes, Series))\n\n\ndef parse_version(version: str | ModuleType | _SupportsVersion) -> tuple[int, ...]:\n """Simple version parser; split into a tuple of ints for comparison.\n\n Arguments:\n version: Version string, or object with one, to parse.\n\n Returns:\n Parsed version number.\n """\n # lifted from Polars\n # [marco]: Take care of DuckDB pre-releases which end with e.g. `-dev4108`\n # and pandas pre-releases which end with e.g. .dev0+618.gb552dc95c9\n version_str = version if isinstance(version, str) else version.__version__\n version_str = re.sub(r"(\D?dev.*$)", "", version_str)\n return tuple(int(re.sub(r"\D", "", v)) for v in version_str.split("."))\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: type, cls_or_tuple: type[_T]\n) -> TypeIs[type[_T]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: object | type, cls_or_tuple: type[_T]\n) -> TypeIs[_T | type[_T]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: type, cls_or_tuple: tuple[type[_T1], type[_T2]]\n) -> TypeIs[type[_T1 | _T2]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: object | type, cls_or_tuple: tuple[type[_T1], type[_T2]]\n) -> TypeIs[_T1 | _T2 | type[_T1 | _T2]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: type, cls_or_tuple: tuple[type[_T1], type[_T2], type[_T3]]\n) -> TypeIs[type[_T1 | _T2 | _T3]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: object | type, cls_or_tuple: tuple[type[_T1], type[_T2], type[_T3]]\n) -> TypeIs[_T1 | _T2 | _T3 | type[_T1 | _T2 | _T3]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: type, cls_or_tuple: tuple[type[_T1], type[_T2], type[_T3], type[_T4]]\n) -> TypeIs[type[_T1 | _T2 | _T3 | _T4]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: object | type,\n cls_or_tuple: tuple[type[_T1], type[_T2], type[_T3], type[_T4]],\n) -> TypeIs[_T1 | _T2 | _T3 | _T4 | type[_T1 | _T2 | _T3 | _T4]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: type,\n cls_or_tuple: tuple[type[_T1], type[_T2], type[_T3], type[_T4], type[_T5]],\n) -> TypeIs[type[_T1 | _T2 | _T3 | _T4 | _T5]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: object | type,\n cls_or_tuple: tuple[type[_T1], type[_T2], type[_T3], type[_T4], type[_T5]],\n) -> TypeIs[_T1 | _T2 | _T3 | _T4 | _T5 | type[_T1 | _T2 | _T3 | _T4 | _T5]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: type,\n cls_or_tuple: tuple[type[_T1], type[_T2], type[_T3], type[_T4], type[_T5], type[_T6]],\n) -> TypeIs[type[_T1 | _T2 | _T3 | _T4 | _T5 | _T6]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: object | type,\n cls_or_tuple: tuple[type[_T1], type[_T2], type[_T3], type[_T4], type[_T5], type[_T6]],\n) -> TypeIs[\n _T1 | _T2 | _T3 | _T4 | _T5 | _T6 | type[_T1 | _T2 | _T3 | _T4 | _T5 | _T6]\n]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: type,\n cls_or_tuple: tuple[\n type[_T1], type[_T2], type[_T3], type[_T4], type[_T5], type[_T6], type[_T7]\n ],\n) -> TypeIs[type[_T1 | _T2 | _T3 | _T4 | _T5 | _T6 | _T7]]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: object | type,\n cls_or_tuple: tuple[\n type[_T1], type[_T2], type[_T3], type[_T4], type[_T5], type[_T6], type[_T7]\n ],\n) -> TypeIs[\n _T1\n | _T2\n | _T3\n | _T4\n | _T5\n | _T6\n | _T7\n | type[_T1 | _T2 | _T3 | _T4 | _T5 | _T6 | _T7]\n]: ...\n\n\n@overload\ndef isinstance_or_issubclass(\n obj_or_cls: Any, cls_or_tuple: tuple[type, ...]\n) -> TypeIs[Any]: ...\n\n\ndef isinstance_or_issubclass(obj_or_cls: Any, cls_or_tuple: Any) -> bool:\n from narwhals.dtypes import DType\n\n if isinstance(obj_or_cls, DType):\n return isinstance(obj_or_cls, cls_or_tuple)\n return isinstance(obj_or_cls, cls_or_tuple) or (\n isinstance(obj_or_cls, type) and issubclass(obj_or_cls, cls_or_tuple)\n )\n\n\ndef validate_laziness(items: Iterable[Any]) -> None:\n from narwhals.dataframe import DataFrame, LazyFrame\n\n if all(isinstance(item, DataFrame) for item in items) or (\n all(isinstance(item, LazyFrame) for item in items)\n ):\n return\n msg = f"The items to concatenate should either all be eager, or all lazy, got: {[type(item) for item in items]}"\n raise TypeError(msg)\n\n\ndef maybe_align_index(\n lhs: FrameOrSeriesT, rhs: Series[Any] | DataFrame[Any] | LazyFrame[Any]\n) -> FrameOrSeriesT:\n """Align `lhs` to the Index of `rhs`, if they're both pandas-like.\n\n Arguments:\n lhs: Dataframe or Series.\n rhs: Dataframe or Series to align with.\n\n Returns:\n Same type as input.\n\n Notes:\n This is only really intended for backwards-compatibility purposes,\n for example if your library already aligns indices for users.\n If you're designing a new library, we highly encourage you to not\n rely on the Index.\n For non-pandas-like inputs, this only checks that `lhs` and `rhs`\n are the same length.\n\n Examples:\n >>> import pandas as pd\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_pd = pd.DataFrame({"a": [1, 2]}, index=[3, 4])\n >>> s_pd = pd.Series([6, 7], index=[4, 3])\n >>> df = nw.from_native(df_pd)\n >>> s = nw.from_native(s_pd, series_only=True)\n >>> nw.to_native(nw.maybe_align_index(df, s))\n a\n 4 2\n 3 1\n """\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n from narwhals._pandas_like.series import PandasLikeSeries\n\n def _validate_index(index: Any) -> None:\n if not index.is_unique:\n msg = "given index doesn't have a unique index"\n raise ValueError(msg)\n\n lhs_any = cast("Any", lhs)\n rhs_any = cast("Any", rhs)\n if isinstance(\n getattr(lhs_any, "_compliant_frame", None), PandasLikeDataFrame\n ) and isinstance(getattr(rhs_any, "_compliant_frame", None), PandasLikeDataFrame):\n _validate_index(lhs_any._compliant_frame.native.index)\n _validate_index(rhs_any._compliant_frame.native.index)\n return lhs_any._with_compliant(\n lhs_any._compliant_frame._with_native(\n lhs_any._compliant_frame.native.loc[rhs_any._compliant_frame.native.index]\n )\n )\n if isinstance(\n getattr(lhs_any, "_compliant_frame", None), PandasLikeDataFrame\n ) and isinstance(getattr(rhs_any, "_compliant_series", None), PandasLikeSeries):\n _validate_index(lhs_any._compliant_frame.native.index)\n _validate_index(rhs_any._compliant_series.native.index)\n return lhs_any._with_compliant(\n lhs_any._compliant_frame._with_native(\n lhs_any._compliant_frame.native.loc[\n rhs_any._compliant_series.native.index\n ]\n )\n )\n if isinstance(\n getattr(lhs_any, "_compliant_series", None), PandasLikeSeries\n ) and isinstance(getattr(rhs_any, "_compliant_frame", None), PandasLikeDataFrame):\n _validate_index(lhs_any._compliant_series.native.index)\n _validate_index(rhs_any._compliant_frame.native.index)\n return lhs_any._with_compliant(\n lhs_any._compliant_series._with_native(\n lhs_any._compliant_series.native.loc[\n rhs_any._compliant_frame.native.index\n ]\n )\n )\n if isinstance(\n getattr(lhs_any, "_compliant_series", None), PandasLikeSeries\n ) and isinstance(getattr(rhs_any, "_compliant_series", None), PandasLikeSeries):\n _validate_index(lhs_any._compliant_series.native.index)\n _validate_index(rhs_any._compliant_series.native.index)\n return lhs_any._with_compliant(\n lhs_any._compliant_series._with_native(\n lhs_any._compliant_series.native.loc[\n rhs_any._compliant_series.native.index\n ]\n )\n )\n if len(lhs_any) != len(rhs_any):\n msg = f"Expected `lhs` and `rhs` to have the same length, got {len(lhs_any)} and {len(rhs_any)}"\n raise ValueError(msg)\n return lhs\n\n\ndef maybe_get_index(obj: DataFrame[Any] | LazyFrame[Any] | Series[Any]) -> Any | None:\n """Get the index of a DataFrame or a Series, if it's pandas-like.\n\n Arguments:\n obj: Dataframe or Series.\n\n Returns:\n Same type as input.\n\n Notes:\n This is only really intended for backwards-compatibility purposes,\n for example if your library already aligns indices for users.\n If you're designing a new library, we highly encourage you to not\n rely on the Index.\n For non-pandas-like inputs, this returns `None`.\n\n Examples:\n >>> import pandas as pd\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_pd = pd.DataFrame({"a": [1, 2], "b": [4, 5]})\n >>> df = nw.from_native(df_pd)\n >>> nw.maybe_get_index(df)\n RangeIndex(start=0, stop=2, step=1)\n >>> series_pd = pd.Series([1, 2])\n >>> series = nw.from_native(series_pd, series_only=True)\n >>> nw.maybe_get_index(series)\n RangeIndex(start=0, stop=2, step=1)\n """\n obj_any = cast("Any", obj)\n native_obj = obj_any.to_native()\n if is_pandas_like_dataframe(native_obj) or is_pandas_like_series(native_obj):\n return native_obj.index\n return None\n\n\ndef maybe_set_index(\n obj: FrameOrSeriesT,\n column_names: str | list[str] | None = None,\n *,\n index: Series[IntoSeriesT] | list[Series[IntoSeriesT]] | None = None,\n) -> FrameOrSeriesT:\n """Set the index of a DataFrame or a Series, if it's pandas-like.\n\n Arguments:\n obj: object for which maybe set the index (can be either a Narwhals `DataFrame`\n or `Series`).\n column_names: name or list of names of the columns to set as index.\n For dataframes, only one of `column_names` and `index` can be specified but\n not both. If `column_names` is passed and `df` is a Series, then a\n `ValueError` is raised.\n index: series or list of series to set as index.\n\n Returns:\n Same type as input.\n\n Raises:\n ValueError: If one of the following conditions happens\n\n - none of `column_names` and `index` are provided\n - both `column_names` and `index` are provided\n - `column_names` is provided and `df` is a Series\n\n Notes:\n This is only really intended for backwards-compatibility purposes, for example if\n your library already aligns indices for users.\n If you're designing a new library, we highly encourage you to not\n rely on the Index.\n\n For non-pandas-like inputs, this is a no-op.\n\n Examples:\n >>> import pandas as pd\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_pd = pd.DataFrame({"a": [1, 2], "b": [4, 5]})\n >>> df = nw.from_native(df_pd)\n >>> nw.to_native(nw.maybe_set_index(df, "b")) # doctest: +NORMALIZE_WHITESPACE\n a\n b\n 4 1\n 5 2\n """\n from narwhals.translate import to_native\n\n df_any = cast("Any", obj)\n native_obj = df_any.to_native()\n\n if column_names is not None and index is not None:\n msg = "Only one of `column_names` or `index` should be provided"\n raise ValueError(msg)\n\n if not column_names and index is None:\n msg = "Either `column_names` or `index` should be provided"\n raise ValueError(msg)\n\n if index is not None:\n keys = (\n [to_native(idx, pass_through=True) for idx in index]\n if _is_iterable(index)\n else to_native(index, pass_through=True)\n )\n else:\n keys = column_names\n\n if is_pandas_like_dataframe(native_obj):\n return df_any._with_compliant(\n df_any._compliant_frame._with_native(native_obj.set_index(keys))\n )\n elif is_pandas_like_series(native_obj):\n from narwhals._pandas_like.utils import set_index\n\n if column_names:\n msg = "Cannot set index using column names on a Series"\n raise ValueError(msg)\n\n native_obj = set_index(\n native_obj,\n keys,\n implementation=obj._compliant_series._implementation, # type: ignore[union-attr]\n backend_version=obj._compliant_series._backend_version, # type: ignore[union-attr]\n )\n return df_any._with_compliant(df_any._compliant_series._with_native(native_obj))\n else:\n return df_any\n\n\ndef maybe_reset_index(obj: FrameOrSeriesT) -> FrameOrSeriesT:\n """Reset the index to the default integer index of a DataFrame or a Series, if it's pandas-like.\n\n Arguments:\n obj: Dataframe or Series.\n\n Returns:\n Same type as input.\n\n Notes:\n This is only really intended for backwards-compatibility purposes,\n for example if your library already resets the index for users.\n If you're designing a new library, we highly encourage you to not\n rely on the Index.\n For non-pandas-like inputs, this is a no-op.\n\n Examples:\n >>> import pandas as pd\n >>> import polars as pl\n >>> import narwhals as nw\n >>> df_pd = pd.DataFrame({"a": [1, 2], "b": [4, 5]}, index=([6, 7]))\n >>> df = nw.from_native(df_pd)\n >>> nw.to_native(nw.maybe_reset_index(df))\n a b\n 0 1 4\n 1 2 5\n >>> series_pd = pd.Series([1, 2])\n >>> series = nw.from_native(series_pd, series_only=True)\n >>> nw.maybe_get_index(series)\n RangeIndex(start=0, stop=2, step=1)\n """\n obj_any = cast("Any", obj)\n native_obj = obj_any.to_native()\n if is_pandas_like_dataframe(native_obj):\n native_namespace = obj_any.__native_namespace__()\n if _has_default_index(native_obj, native_namespace):\n return obj_any\n return obj_any._with_compliant(\n obj_any._compliant_frame._with_native(native_obj.reset_index(drop=True))\n )\n if is_pandas_like_series(native_obj):\n native_namespace = obj_any.__native_namespace__()\n if _has_default_index(native_obj, native_namespace):\n return obj_any\n return obj_any._with_compliant(\n obj_any._compliant_series._with_native(native_obj.reset_index(drop=True))\n )\n return obj_any\n\n\ndef _is_range_index(obj: Any, native_namespace: Any) -> TypeIs[pd.RangeIndex]:\n return isinstance(obj, native_namespace.RangeIndex)\n\n\ndef _has_default_index(\n native_frame_or_series: pd.Series[Any] | pd.DataFrame, native_namespace: Any\n) -> bool:\n index = native_frame_or_series.index\n return (\n _is_range_index(index, native_namespace)\n and index.start == 0\n and index.stop == len(index)\n and index.step == 1\n )\n\n\ndef maybe_convert_dtypes(\n obj: FrameOrSeriesT, *args: bool, **kwargs: bool | str\n) -> FrameOrSeriesT:\n """Convert columns or series to the best possible dtypes using dtypes supporting ``pd.NA``, if df is pandas-like.\n\n Arguments:\n obj: DataFrame or Series.\n *args: Additional arguments which gets passed through.\n **kwargs: Additional arguments which gets passed through.\n\n Returns:\n Same type as input.\n\n Notes:\n For non-pandas-like inputs, this is a no-op.\n Also, `args` and `kwargs` just get passed down to the underlying library as-is.\n\n Examples:\n >>> import pandas as pd\n >>> import polars as pl\n >>> import narwhals as nw\n >>> import numpy as np\n >>> df_pd = pd.DataFrame(\n ... {\n ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),\n ... "b": pd.Series([True, False, np.nan], dtype=np.dtype("O")),\n ... }\n ... )\n >>> df = nw.from_native(df_pd)\n >>> nw.to_native(\n ... nw.maybe_convert_dtypes(df)\n ... ).dtypes # doctest: +NORMALIZE_WHITESPACE\n a Int32\n b boolean\n dtype: object\n """\n obj_any = cast("Any", obj)\n native_obj = obj_any.to_native()\n if is_pandas_like_dataframe(native_obj):\n return obj_any._with_compliant(\n obj_any._compliant_frame._with_native(\n native_obj.convert_dtypes(*args, **kwargs)\n )\n )\n if is_pandas_like_series(native_obj):\n return obj_any._with_compliant(\n obj_any._compliant_series._with_native(\n native_obj.convert_dtypes(*args, **kwargs)\n )\n )\n return obj_any\n\n\ndef scale_bytes(sz: int, unit: SizeUnit) -> int | float:\n """Scale size in bytes to other size units (eg: "kb", "mb", "gb", "tb").\n\n Arguments:\n sz: original size in bytes\n unit: size unit to convert into\n\n Returns:\n Integer or float.\n """\n if unit in {"b", "bytes"}:\n return sz\n elif unit in {"kb", "kilobytes"}:\n return sz / 1024\n elif unit in {"mb", "megabytes"}:\n return sz / 1024**2\n elif unit in {"gb", "gigabytes"}:\n return sz / 1024**3\n elif unit in {"tb", "terabytes"}:\n return sz / 1024**4\n else:\n msg = f"`unit` must be one of {{'b', 'kb', 'mb', 'gb', 'tb'}}, got {unit!r}"\n raise ValueError(msg)\n\n\ndef is_ordered_categorical(series: Series[Any]) -> bool:\n """Return whether indices of categories are semantically meaningful.\n\n This is a convenience function to accessing what would otherwise be\n the `is_ordered` property from the DataFrame Interchange Protocol,\n see https://data-apis.org/dataframe-protocol/latest/API.html.\n\n - For Polars:\n - Enums are always ordered.\n - Categoricals are ordered if `dtype.ordering == "physical"`.\n - For pandas-like APIs:\n - Categoricals are ordered if `dtype.cat.ordered == True`.\n - For PyArrow table:\n - Categoricals are ordered if `dtype.type.ordered == True`.\n\n Arguments:\n series: Input Series.\n\n Returns:\n Whether the Series is an ordered categorical.\n\n Examples:\n >>> import narwhals as nw\n >>> import pandas as pd\n >>> import polars as pl\n >>> data = ["x", "y"]\n >>> s_pd = pd.Series(data, dtype=pd.CategoricalDtype(ordered=True))\n >>> s_pl = pl.Series(data, dtype=pl.Categorical(ordering="physical"))\n\n Let's define a library-agnostic function:\n\n >>> @nw.narwhalify\n ... def func(s):\n ... return nw.is_ordered_categorical(s)\n\n Then, we can pass any supported library to `func`:\n\n >>> func(s_pd)\n True\n >>> func(s_pl)\n True\n """\n from narwhals._interchange.series import InterchangeSeries\n\n dtypes = series._compliant_series._version.dtypes\n compliant = series._compliant_series\n # If it doesn't match any branches, let's just play it safe and return False.\n result: bool = False\n if isinstance(compliant, InterchangeSeries) and isinstance(\n series.dtype, dtypes.Categorical\n ):\n result = compliant.native.describe_categorical["is_ordered"]\n elif series.dtype == dtypes.Enum:\n result = True\n elif series.dtype != dtypes.Categorical:\n result = False\n else:\n native = series.to_native()\n if is_polars_series(native):\n result = cast("pl.Categorical", native.dtype).ordering == "physical"\n elif is_pandas_like_series(native):\n result = bool(native.cat.ordered)\n elif is_pyarrow_chunked_array(native):\n from narwhals._arrow.utils import is_dictionary\n\n result = is_dictionary(native.type) and native.type.ordered\n return result\n\n\ndef generate_unique_token(\n n_bytes: int, columns: Container[str]\n) -> str: # pragma: no cover\n msg = (\n "Use `generate_temporary_column_name` instead. `generate_unique_token` is "\n "deprecated and it will be removed in future versions"\n )\n issue_deprecation_warning(msg, _version="1.13.0")\n return generate_temporary_column_name(n_bytes=n_bytes, columns=columns)\n\n\ndef generate_temporary_column_name(n_bytes: int, columns: Container[str]) -> str:\n """Generates a unique column name that is not present in the given list of columns.\n\n It relies on [python secrets token_hex](https://docs.python.org/3/library/secrets.html#secrets.token_hex)\n function to return a string nbytes random bytes.\n\n Arguments:\n n_bytes: The number of bytes to generate for the token.\n columns: The list of columns to check for uniqueness.\n\n Returns:\n A unique token that is not present in the given list of columns.\n\n Raises:\n AssertionError: If a unique token cannot be generated after 100 attempts.\n\n Examples:\n >>> import narwhals as nw\n >>> columns = ["abc", "xyz"]\n >>> nw.generate_temporary_column_name(n_bytes=8, columns=columns) not in columns\n True\n """\n counter = 0\n while True:\n # Prepend `'nw'` to ensure it always starts with a character\n # https://github.com/narwhals-dev/narwhals/issues/2510\n token = f"nw{token_hex(n_bytes - 1)}"\n if token not in columns:\n return token\n\n counter += 1\n if counter > 100:\n msg = (\n "Internal Error: Narwhals was not able to generate a column name with "\n f"{n_bytes=} and not in {columns}"\n )\n raise AssertionError(msg)\n\n\ndef parse_columns_to_drop(\n frame: _StoresColumns, subset: Iterable[str], /, *, strict: bool\n) -> list[str]:\n if not strict:\n return list(set(frame.columns).intersection(subset))\n to_drop = list(subset)\n if error := check_columns_exist(to_drop, available=frame.columns):\n raise error\n return to_drop\n\n\ndef is_sequence_but_not_str(sequence: Sequence[_T] | Any) -> TypeIs[Sequence[_T]]:\n return isinstance(sequence, Sequence) and not isinstance(sequence, str)\n\n\ndef is_slice_none(obj: Any) -> TypeIs[_SliceNone]:\n return isinstance(obj, slice) and obj == slice(None)\n\n\ndef is_sized_multi_index_selector(\n obj: Any,\n) -> TypeIs[SizedMultiIndexSelector[Series[Any] | CompliantSeries[Any]]]:\n return (\n (\n is_sequence_but_not_str(obj)\n and ((len(obj) > 0 and isinstance(obj[0], int)) or (len(obj) == 0))\n )\n or is_numpy_array_1d_int(obj)\n or is_narwhals_series_int(obj)\n or is_compliant_series_int(obj)\n )\n\n\ndef is_sequence_like(\n obj: Sequence[_T] | Any,\n) -> TypeIs[Sequence[_T] | Series[Any] | _1DArray]:\n return (\n is_sequence_but_not_str(obj)\n or is_numpy_array_1d(obj)\n or is_narwhals_series(obj)\n or is_compliant_series(obj)\n )\n\n\ndef is_slice_index(obj: Any) -> TypeIs[_SliceIndex]:\n return isinstance(obj, slice) and (\n isinstance(obj.start, int)\n or isinstance(obj.stop, int)\n or (isinstance(obj.step, int) and obj.start is None and obj.stop is None)\n )\n\n\ndef is_range(obj: Any) -> TypeIs[range]:\n return isinstance(obj, range)\n\n\ndef is_single_index_selector(obj: Any) -> TypeIs[SingleIndexSelector]:\n return bool(isinstance(obj, int) and not isinstance(obj, bool))\n\n\ndef is_index_selector(\n obj: Any,\n) -> TypeIs[SingleIndexSelector | MultiIndexSelector[Series[Any] | CompliantSeries[Any]]]:\n return (\n is_single_index_selector(obj)\n or is_sized_multi_index_selector(obj)\n or is_slice_index(obj)\n )\n\n\ndef is_list_of(obj: Any, tp: type[_T]) -> TypeIs[list[_T]]:\n # Check if an object is a list of `tp`, only sniffing the first element.\n return bool(isinstance(obj, list) and obj and isinstance(obj[0], tp))\n\n\ndef is_sequence_of(obj: Any, tp: type[_T]) -> TypeIs[Sequence[_T]]:\n # Check if an object is a sequence of `tp`, only sniffing the first element.\n return bool(\n is_sequence_but_not_str(obj)\n and (first := next(iter(obj), None))\n and isinstance(first, tp)\n )\n\n\ndef find_stacklevel() -> int:\n """Find the first place in the stack that is not inside narwhals.\n\n Returns:\n Stacklevel.\n\n Taken from:\n https://github.com/pandas-dev/pandas/blob/ab89c53f48df67709a533b6a95ce3d911871a0a8/pandas/util/_exceptions.py#L30-L51\n """\n import inspect\n from pathlib import Path\n\n import narwhals as nw\n\n pkg_dir = str(Path(nw.__file__).parent)\n\n # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow\n frame = inspect.currentframe()\n n = 0\n try:\n while frame:\n fname = inspect.getfile(frame)\n if fname.startswith(pkg_dir) or (\n (qualname := getattr(frame.f_code, "co_qualname", None))\n # ignore @singledispatch wrappers\n and qualname.startswith("singledispatch.")\n ):\n frame = frame.f_back\n n += 1\n else: # pragma: no cover\n break\n else: # pragma: no cover\n pass\n finally:\n # https://docs.python.org/3/library/inspect.html\n # > Though the cycle detector will catch these, destruction of the frames\n # > (and local variables) can be made deterministic by removing the cycle\n # > in a finally clause.\n del frame\n return n\n\n\ndef issue_deprecation_warning(message: str, _version: str) -> None:\n """Issue a deprecation warning.\n\n Arguments:\n message: The message associated with the warning.\n _version: Narwhals version when the warning was introduced. Just used for internal\n bookkeeping.\n """\n warn(message=message, category=DeprecationWarning, stacklevel=find_stacklevel())\n\n\ndef validate_strict_and_pass_though(\n strict: bool | None, # noqa: FBT001\n pass_through: bool | None, # noqa: FBT001\n *,\n pass_through_default: bool,\n emit_deprecation_warning: bool,\n) -> bool:\n if strict is None and pass_through is None:\n pass_through = pass_through_default\n elif strict is not None and pass_through is None:\n if emit_deprecation_warning:\n msg = (\n "`strict` in `from_native` is deprecated, please use `pass_through` instead.\n\n"\n "Note: `strict` will remain available in `narwhals.stable.v1`.\n"\n "See https://narwhals-dev.github.io/narwhals/backcompat/ for more information.\n"\n )\n issue_deprecation_warning(msg, _version="1.13.0")\n pass_through = not strict\n elif strict is None and pass_through is not None:\n pass\n else:\n msg = "Cannot pass both `strict` and `pass_through`"\n raise ValueError(msg)\n return pass_through\n\n\ndef deprecate_native_namespace(\n *, warn_version: str = "", required: bool = False\n) -> Callable[[Callable[P, R]], Callable[P, R]]:\n """Decorator to transition from `native_namespace` to `backend` argument.\n\n Arguments:\n warn_version: Emit a deprecation warning from this version.\n required: Raise when both `native_namespace`, `backend` are `None`.\n\n Returns:\n Wrapped function, with `native_namespace` **removed**.\n """\n\n def decorate(fn: Callable[P, R], /) -> Callable[P, R]:\n @wraps(fn)\n def wrapper(*args: P.args, **kwds: P.kwargs) -> R:\n backend = kwds.pop("backend", None)\n native_namespace = kwds.pop("native_namespace", None)\n if native_namespace is not None and backend is None:\n if warn_version:\n msg = (\n "`native_namespace` is deprecated, please use `backend` instead.\n\n"\n "Note: `native_namespace` will remain available in `narwhals.stable.v1`.\n"\n "See https://narwhals-dev.github.io/narwhals/backcompat/ for more information.\n"\n )\n issue_deprecation_warning(msg, _version=warn_version)\n backend = native_namespace\n elif native_namespace is not None and backend is not None:\n msg = "Can't pass both `native_namespace` and `backend`"\n raise ValueError(msg)\n elif native_namespace is None and backend is None and required:\n msg = f"`backend` must be specified in `{fn.__name__}`."\n raise ValueError(msg)\n kwds["backend"] = backend\n return fn(*args, **kwds)\n\n return wrapper\n\n return decorate\n\n\ndef _validate_rolling_arguments(\n window_size: int, min_samples: int | None\n) -> tuple[int, int]:\n ensure_type(window_size, int, param_name="window_size")\n ensure_type(min_samples, int, type(None), param_name="min_samples")\n\n if window_size < 1:\n msg = "window_size must be greater or equal than 1"\n raise ValueError(msg)\n\n if min_samples is not None:\n if min_samples < 1:\n msg = "min_samples must be greater or equal than 1"\n raise ValueError(msg)\n\n if min_samples > window_size:\n msg = "`min_samples` must be less or equal than `window_size`"\n raise InvalidOperationError(msg)\n else:\n min_samples = window_size\n\n return window_size, min_samples\n\n\ndef generate_repr(header: str, native_repr: str) -> str:\n try:\n terminal_width = os.get_terminal_size().columns\n except OSError:\n terminal_width = int(os.getenv("COLUMNS", 80)) # noqa: PLW1508\n native_lines = native_repr.expandtabs().splitlines()\n max_native_width = max(len(line) for line in native_lines)\n\n if max_native_width + 2 <= terminal_width:\n length = max(max_native_width, len(header))\n output = f"┌{'─' * length}┐\n"\n header_extra = length - len(header)\n output += f"|{' ' * (header_extra // 2)}{header}{' ' * (header_extra // 2 + header_extra % 2)}|\n"\n output += f"|{'-' * (length)}|\n"\n start_extra = (length - max_native_width) // 2\n end_extra = (length - max_native_width) // 2 + (length - max_native_width) % 2\n for line in native_lines:\n output += f"|{' ' * (start_extra)}{line}{' ' * (end_extra + max_native_width - len(line))}|\n"\n output += f"└{'─' * length}┘"\n return output\n\n diff = 39 - len(header)\n return (\n f"┌{'─' * (39)}┐\n"\n f"|{' ' * (diff // 2)}{header}{' ' * (diff // 2 + diff % 2)}|\n"\n "| Use `.to_native` to see native output |\n└"\n f"{'─' * 39}┘"\n )\n\n\ndef check_columns_exist(\n subset: Collection[str], /, *, available: Collection[str]\n) -> ColumnNotFoundError | None:\n if missing := set(subset).difference(available):\n return ColumnNotFoundError.from_missing_and_available_column_names(\n missing, available\n )\n return None\n\n\ndef check_column_names_are_unique(columns: Collection[str]) -> None:\n if len(columns) != len(set(columns)):\n from collections import Counter\n\n counter = Counter(columns)\n duplicates = {k: v for k, v in counter.items() if v > 1}\n msg = "".join(f"\n- '{k}' {v} times" for k, v in duplicates.items())\n msg = f"Expected unique column names, got:{msg}"\n raise DuplicateError(msg)\n\n\ndef _parse_time_unit_and_time_zone(\n time_unit: TimeUnit | Iterable[TimeUnit] | None,\n time_zone: str | timezone | Iterable[str | timezone | None] | None,\n) -> tuple[Set[TimeUnit], Set[str | None]]:\n time_units: Set[TimeUnit] = (\n {"ms", "us", "ns", "s"}\n if time_unit is None\n else {time_unit}\n if isinstance(time_unit, str)\n else set(time_unit)\n )\n time_zones: Set[str | None] = (\n {None}\n if time_zone is None\n else {str(time_zone)}\n if isinstance(time_zone, (str, timezone))\n else {str(tz) if tz is not None else None for tz in time_zone}\n )\n return time_units, time_zones\n\n\ndef dtype_matches_time_unit_and_time_zone(\n dtype: DType, dtypes: DTypes, time_units: Set[TimeUnit], time_zones: Set[str | None]\n) -> bool:\n return (\n isinstance(dtype, dtypes.Datetime)\n and (dtype.time_unit in time_units)\n and (\n dtype.time_zone in time_zones\n or ("*" in time_zones and dtype.time_zone is not None)\n )\n )\n\n\ndef get_column_names(frame: _StoresColumns, /) -> Sequence[str]:\n return frame.columns\n\n\ndef exclude_column_names(frame: _StoresColumns, names: Container[str]) -> Sequence[str]:\n return [col_name for col_name in frame.columns if col_name not in names]\n\n\ndef passthrough_column_names(names: Sequence[str], /) -> EvalNames[Any]:\n def fn(_frame: Any, /) -> Sequence[str]:\n return names\n\n return fn\n\n\ndef _hasattr_static(obj: Any, attr: str) -> bool:\n sentinel = object()\n return getattr_static(obj, attr, sentinel) is not sentinel\n\n\ndef is_compliant_dataframe(\n obj: CompliantDataFrame[\n CompliantSeriesT, CompliantExprT, NativeFrameT_co, ToNarwhalsT_co\n ]\n | Any,\n) -> TypeIs[\n CompliantDataFrame[CompliantSeriesT, CompliantExprT, NativeFrameT_co, ToNarwhalsT_co]\n]:\n return _hasattr_static(obj, "__narwhals_dataframe__")\n\n\ndef is_compliant_lazyframe(\n obj: CompliantLazyFrame[CompliantExprT, NativeFrameT_co, ToNarwhalsT_co] | Any,\n) -> TypeIs[CompliantLazyFrame[CompliantExprT, NativeFrameT_co, ToNarwhalsT_co]]:\n return _hasattr_static(obj, "__narwhals_lazyframe__")\n\n\ndef is_compliant_series(\n obj: CompliantSeries[NativeSeriesT_co] | Any,\n) -> TypeIs[CompliantSeries[NativeSeriesT_co]]:\n return _hasattr_static(obj, "__narwhals_series__")\n\n\ndef is_compliant_series_int(\n obj: CompliantSeries[NativeSeriesT_co] | Any,\n) -> TypeIs[CompliantSeries[NativeSeriesT_co]]:\n return is_compliant_series(obj) and obj.dtype.is_integer()\n\n\ndef is_compliant_expr(\n obj: CompliantExpr[CompliantFrameT, CompliantSeriesOrNativeExprT_co] | Any,\n) -> TypeIs[CompliantExpr[CompliantFrameT, CompliantSeriesOrNativeExprT_co]]:\n return hasattr(obj, "__narwhals_expr__")\n\n\ndef is_eager_allowed(obj: Implementation) -> TypeIs[EagerAllowedImplementation]:\n return obj in {\n Implementation.PANDAS,\n Implementation.MODIN,\n Implementation.CUDF,\n Implementation.POLARS,\n Implementation.PYARROW,\n }\n\n\ndef has_native_namespace(obj: Any) -> TypeIs[SupportsNativeNamespace]:\n return hasattr(obj, "__native_namespace__")\n\n\ndef _supports_dataframe_interchange(obj: Any) -> TypeIs[DataFrameLike]:\n return hasattr(obj, "__dataframe__")\n\n\ndef supports_arrow_c_stream(obj: Any) -> TypeIs[ArrowStreamExportable]:\n return _hasattr_static(obj, "__arrow_c_stream__")\n\n\ndef _remap_full_join_keys(\n left_on: Collection[str], right_on: Collection[str], suffix: str\n) -> dict[str, str]:\n """Remap join keys to avoid collisions.\n\n If left keys collide with the right keys, append the suffix.\n If there's no collision, let the right keys be.\n\n Arguments:\n left_on: Left keys.\n right_on: Right keys.\n suffix: Suffix to append to right keys.\n\n Returns:\n A map of old to new right keys.\n """\n right_keys_suffixed = (\n f"{key}{suffix}" if key in left_on else key for key in right_on\n )\n return dict(zip(right_on, right_keys_suffixed))\n\n\ndef _into_arrow_table(data: IntoArrowTable, context: _FullContext, /) -> pa.Table:\n """Guards `ArrowDataFrame.from_arrow` w/ safer imports.\n\n Arguments:\n data: Object which implements `__arrow_c_stream__`.\n context: Initialized compliant object.\n\n Returns:\n A PyArrow Table.\n """\n if find_spec("pyarrow"):\n import pyarrow as pa # ignore-banned-import\n\n from narwhals._arrow.namespace import ArrowNamespace\n\n version = context._version\n ns = ArrowNamespace(backend_version=parse_version(pa), version=version)\n return ns._dataframe.from_arrow(data, context=ns).native\n else: # pragma: no cover\n msg = f"'pyarrow>=14.0.0' is required for `from_arrow` for object of type {qualified_type_name(data)!r}."\n raise ModuleNotFoundError(msg)\n\n\n# TODO @dangotbanned: Extend with runtime behavior for `v1.*`\n# See `narwhals.exceptions.NarwhalsUnstableWarning`\ndef unstable(fn: _Fn, /) -> _Fn:\n """Visual-only marker for unstable functionality.\n\n Arguments:\n fn: Function to decorate.\n\n Returns:\n Decorated function (unchanged).\n\n Examples:\n >>> from narwhals._utils import unstable\n >>> @unstable\n ... def a_work_in_progress_feature(*args):\n ... return args\n >>>\n >>> a_work_in_progress_feature.__name__\n 'a_work_in_progress_feature'\n >>> a_work_in_progress_feature(1, 2, 3)\n (1, 2, 3)\n """\n return fn\n\n\ndef _is_naive_format(format: str) -> bool:\n """Determines if a datetime format string is 'naive', i.e., does not include timezone information.\n\n A format is considered naive if it does not contain any of the following\n\n - '%s': Unix timestamp\n - '%z': UTC offset\n - 'Z' : UTC timezone designator\n\n Arguments:\n format: The datetime format string to check.\n\n Returns:\n bool: True if the format is naive (does not include timezone info), False otherwise.\n """\n return not any(x in format for x in ("%s", "%z", "Z"))\n\n\nclass not_implemented: # noqa: N801\n """Mark some functionality as unsupported.\n\n Arguments:\n alias: optional name used instead of the data model hook [`__set_name__`].\n\n Returns:\n An exception-raising [descriptor].\n\n Notes:\n - Attribute/method name *doesn't* need to be declared twice\n - Allows different behavior when looked up on the class vs instance\n - Allows us to use `isinstance(...)` instead of monkeypatching an attribute to the function\n\n Examples:\n >>> from narwhals._utils import not_implemented\n >>> class Thing:\n ... def totally_ready(self) -> str:\n ... return "I'm ready!"\n ...\n ... not_ready_yet = not_implemented()\n >>>\n >>> thing = Thing()\n >>> thing.totally_ready()\n "I'm ready!"\n >>> thing.not_ready_yet()\n Traceback (most recent call last):\n ...\n NotImplementedError: 'not_ready_yet' is not implemented for: 'Thing'.\n ...\n >>> isinstance(Thing.not_ready_yet, not_implemented)\n True\n\n [`__set_name__`]: https://docs.python.org/3/reference/datamodel.html#object.__set_name__\n [descriptor]: https://docs.python.org/3/howto/descriptor.html\n """\n\n def __init__(self, alias: str | None = None, /) -> None:\n # NOTE: Don't like this\n # Trying to workaround `mypy` requiring `@property` everywhere\n self._alias: str | None = alias\n\n def __repr__(self) -> str:\n return f"<{type(self).__name__}>: {self._name_owner}.{self._name}"\n\n def __set_name__(self, owner: type[_T], name: str) -> None:\n # https://docs.python.org/3/howto/descriptor.html#customized-names\n self._name_owner: str = owner.__name__\n self._name: str = self._alias or name\n\n def __get__(\n self, instance: _T | Literal["raise"] | None, owner: type[_T] | None = None, /\n ) -> Any:\n if instance is None:\n # NOTE: Branch for `cls._name`\n # We can check that to see if an instance of `type(self)` for\n # https://narwhals-dev.github.io/narwhals/api-completeness/expr/\n return self\n # NOTE: Prefer not exposing the actual class we're defining in\n # `_implementation` may not be available everywhere\n who = getattr(instance, "_implementation", self._name_owner)\n _raise_not_implemented_error(self._name, who)\n return None # pragma: no cover\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n # NOTE: Purely to duck-type as assignable to **any** instance method\n # Wouldn't be reachable through *regular* attribute access\n return self.__get__("raise")\n\n @classmethod\n def deprecated(cls, message: LiteralString, /) -> Self:\n """Alt constructor, wraps with `@deprecated`.\n\n Arguments:\n message: **Static-only** deprecation message, emitted in an IDE.\n\n Returns:\n An exception-raising [descriptor].\n\n [descriptor]: https://docs.python.org/3/howto/descriptor.html\n """\n obj = cls()\n return deprecated(message)(obj)\n\n\ndef _raise_not_implemented_error(what: str, who: str, /) -> NotImplementedError:\n msg = (\n f"{what!r} is not implemented for: {who!r}.\n\n"\n "If you would like to see this functionality in `narwhals`, "\n "please open an issue at: https://github.com/narwhals-dev/narwhals/issues"\n )\n raise NotImplementedError(msg)\n\n\nclass requires: # noqa: N801\n """Method decorator for raising under certain constraints.\n\n Attributes:\n _min_version: Minimum backend version.\n _hint: Optional suggested alternative.\n\n Examples:\n >>> from narwhals._utils import requires, Implementation\n >>> class SomeBackend:\n ... _implementation = Implementation.PYARROW\n ... _backend_version = 20, 0, 0\n ...\n ... @requires.backend_version((9000, 0, 0))\n ... def really_complex_feature(self) -> str:\n ... return "hello"\n >>> backend = SomeBackend()\n >>> backend.really_complex_feature()\n Traceback (most recent call last):\n ...\n NotImplementedError: `really_complex_feature` is only available in 'pyarrow>=9000.0.0', found version '20.0.0'.\n """\n\n _min_version: tuple[int, ...]\n _hint: str\n\n @classmethod\n def backend_version(cls, minimum: tuple[int, ...], /, hint: str = "") -> Self:\n """Method decorator for raising below a minimum `_backend_version`.\n\n Arguments:\n minimum: Minimum backend version.\n hint: Optional suggested alternative.\n\n Returns:\n An exception-raising decorator.\n """\n obj = cls.__new__(cls)\n obj._min_version = minimum\n obj._hint = hint\n return obj\n\n @staticmethod\n def _unparse_version(backend_version: tuple[int, ...], /) -> str:\n return ".".join(f"{d}" for d in backend_version)\n\n def _ensure_version(self, instance: _FullContext, /) -> None:\n if instance._backend_version >= self._min_version:\n return\n method = self._wrapped_name\n backend = instance._implementation\n minimum = self._unparse_version(self._min_version)\n found = self._unparse_version(instance._backend_version)\n msg = f"`{method}` is only available in '{backend}>={minimum}', found version {found!r}."\n if self._hint:\n msg = f"{msg}\n{self._hint}"\n raise NotImplementedError(msg)\n\n def __call__(self, fn: _Method[_ContextT, P, R], /) -> _Method[_ContextT, P, R]:\n self._wrapped_name = fn.__name__\n\n @wraps(fn)\n def wrapper(instance: _ContextT, *args: P.args, **kwds: P.kwargs) -> R:\n self._ensure_version(instance)\n return fn(instance, *args, **kwds)\n\n # NOTE: Only getting a complaint from `mypy`\n return wrapper # type: ignore[return-value]\n\n\ndef convert_str_slice_to_int_slice(\n str_slice: _SliceName, columns: Sequence[str]\n) -> tuple[int | None, int | None, Any]:\n start = columns.index(str_slice.start) if str_slice.start is not None else None\n stop = columns.index(str_slice.stop) + 1 if str_slice.stop is not None else None\n step = str_slice.step\n return (start, stop, step)\n\n\ndef inherit_doc(\n tp_parent: Callable[P, R1], /\n) -> Callable[[_Constructor[_T, P, R2]], _Constructor[_T, P, R2]]:\n """Steal the class-level docstring from parent and attach to child `__init__`.\n\n Returns:\n Decorated constructor.\n\n Notes:\n - Passes static typing (mostly)\n - Passes at runtime\n """\n\n def decorate(init_child: _Constructor[_T, P, R2], /) -> _Constructor[_T, P, R2]:\n if init_child.__name__ == "__init__" and issubclass(type(tp_parent), type):\n init_child.__doc__ = getdoc(tp_parent)\n return init_child\n else: # pragma: no cover\n msg = (\n f"`@{inherit_doc.__name__}` is only allowed to decorate an `__init__` with a class-level doc.\n"\n f"Method: {init_child.__qualname__!r}\n"\n f"Parent: {tp_parent!r}"\n )\n raise TypeError(msg)\n\n return decorate\n\n\ndef qualified_type_name(obj: object | type[Any], /) -> str:\n tp = obj if isinstance(obj, type) else type(obj)\n module = tp.__module__ if tp.__module__ != "builtins" else ""\n return f"{module}.{tp.__name__}".lstrip(".")\n\n\ndef ensure_type(obj: Any, /, *valid_types: type[Any], param_name: str = "") -> None:\n """Validate that an object is an instance of one or more specified types.\n\n Parameters:\n obj: The object to validate.\n *valid_types: One or more valid types that `obj` is expected to match.\n param_name: The name of the parameter being validated.\n Used to improve error message clarity.\n\n Raises:\n TypeError: If `obj` is not an instance of any of the provided `valid_types`.\n\n Examples:\n >>> from narwhals._utils import ensure_type\n >>> ensure_type(42, int, float)\n >>> ensure_type("hello", str)\n\n >>> ensure_type("hello", int, param_name="test")\n Traceback (most recent call last):\n ...\n TypeError: Expected 'int', got: 'str'\n test='hello'\n ^^^^^^^\n >>> import polars as pl\n >>> import pandas as pd\n >>> df = pl.DataFrame([[1], [2], [3], [4], [5]], schema=[*"abcde"])\n >>> ensure_type(df, pd.DataFrame, param_name="df")\n Traceback (most recent call last):\n ...\n TypeError: Expected 'pandas.core.frame.DataFrame', got: 'polars.dataframe.frame.DataFrame'\n df=polars.dataframe.frame.DataFrame(...)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n """\n if not isinstance(obj, valid_types): # pragma: no cover\n tp_names = " | ".join(qualified_type_name(tp) for tp in valid_types)\n msg = f"Expected {tp_names!r}, got: {qualified_type_name(obj)!r}"\n if param_name:\n left_pad = " " * 4\n val = repr(obj)\n if len(val) > 40: # truncate long reprs\n val = f"{qualified_type_name(obj)}(...)"\n assign = f"{left_pad}{param_name}="\n underline = (" " * len(assign)) + ("^" * len(val))\n msg = f"{msg}\n{assign}{val}\n{underline}"\n raise TypeError(msg)\n\n\nclass _DeferredIterable(Generic[_T]):\n """Store a callable producing an iterable to defer collection until we need it."""\n\n def __init__(self, into_iter: Callable[[], Iterable[_T]], /) -> None:\n self._into_iter: Callable[[], Iterable[_T]] = into_iter\n\n def __iter__(self) -> Iterator[_T]:\n yield from self._into_iter()\n\n def to_tuple(self) -> tuple[_T, ...]:\n # Collect and return as a `tuple`.\n it = self._into_iter()\n return it if isinstance(it, tuple) else tuple(it)\n
.venv\Lib\site-packages\narwhals\_utils.py
_utils.py
Python
66,589
0.75
0.147733
0.021199
node-utils
147
2024-02-15T21:45:11.881119
BSD-3-Clause
false
268d7111971ef1f0fe6aa8f1fe5d79ac
from __future__ import annotations\n\nimport typing as _t\n\nfrom narwhals import dependencies, dtypes, exceptions, selectors\nfrom narwhals._utils import (\n Implementation,\n generate_temporary_column_name,\n is_ordered_categorical,\n maybe_align_index,\n maybe_convert_dtypes,\n maybe_get_index,\n maybe_reset_index,\n maybe_set_index,\n)\nfrom narwhals.dataframe import DataFrame, LazyFrame\nfrom narwhals.dtypes import (\n Array,\n Binary,\n Boolean,\n Categorical,\n Date,\n Datetime,\n Decimal,\n Duration,\n Enum,\n Field,\n Float32,\n Float64,\n Int8,\n Int16,\n Int32,\n Int64,\n Int128,\n List,\n Object,\n String,\n Struct,\n Time,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n UInt128,\n Unknown,\n)\nfrom narwhals.expr import Expr\nfrom narwhals.functions import (\n all_ as all,\n all_horizontal,\n any_horizontal,\n col,\n concat,\n concat_str,\n exclude,\n from_arrow,\n from_dict,\n from_numpy,\n get_level,\n len_ as len,\n lit,\n max,\n max_horizontal,\n mean,\n mean_horizontal,\n median,\n min,\n min_horizontal,\n new_series,\n nth,\n read_csv,\n read_parquet,\n scan_csv,\n scan_parquet,\n show_versions,\n sum,\n sum_horizontal,\n when,\n)\nfrom narwhals.schema import Schema\nfrom narwhals.series import Series\nfrom narwhals.translate import (\n from_native,\n get_native_namespace,\n narwhalify,\n to_native,\n to_py_scalar,\n)\n\n__version__: str\n\n__all__ = [\n "Array",\n "Binary",\n "Boolean",\n "Categorical",\n "DataFrame",\n "Date",\n "Datetime",\n "Decimal",\n "Duration",\n "Enum",\n "Expr",\n "Field",\n "Float32",\n "Float64",\n "Implementation",\n "Int8",\n "Int16",\n "Int32",\n "Int64",\n "Int128",\n "LazyFrame",\n "List",\n "Object",\n "Schema",\n "Series",\n "String",\n "Struct",\n "Time",\n "UInt8",\n "UInt16",\n "UInt32",\n "UInt64",\n "UInt128",\n "Unknown",\n "all",\n "all_horizontal",\n "any_horizontal",\n "col",\n "concat",\n "concat_str",\n "dependencies",\n "dtypes",\n "exceptions",\n "exclude",\n "from_arrow",\n "from_dict",\n "from_native",\n "from_numpy",\n "generate_temporary_column_name",\n "get_level",\n "get_native_namespace",\n "is_ordered_categorical",\n "len",\n "lit",\n "max",\n "max_horizontal",\n "maybe_align_index",\n "maybe_convert_dtypes",\n "maybe_get_index",\n "maybe_reset_index",\n "maybe_set_index",\n "mean",\n "mean_horizontal",\n "median",\n "min",\n "min_horizontal",\n "narwhalify",\n "new_series",\n "nth",\n "read_csv",\n "read_parquet",\n "scan_csv",\n "scan_parquet",\n "selectors",\n "show_versions",\n "sum",\n "sum_horizontal",\n "to_native",\n "to_py_scalar",\n "when",\n]\n\n\ndef __getattr__(name: _t.Literal["__version__"]) -> str: # type: ignore[misc]\n if name == "__version__":\n global __version__ # noqa: PLW0603\n\n from importlib import metadata\n\n __version__ = metadata.version(__name__)\n return __version__\n else:\n msg = f"module {__name__!r} has no attribute {name!r}"\n raise AttributeError(msg)\n
.venv\Lib\site-packages\narwhals\__init__.py
__init__.py
Python
3,226
0.95
0.010753
0
vue-tools
167
2024-06-26T23:28:38.511615
BSD-3-Clause
false
5b9f639c4eca2ac9f9c5360c90256012
from __future__ import annotations\n\nfrom narwhals.stable import v1\n\n__all__ = ["v1"]\n
.venv\Lib\site-packages\narwhals\stable\__init__.py
__init__.py
Python
85
0.65
0
0
python-kit
612
2025-06-19T20:29:15.704458
BSD-3-Clause
false
43c42e405544f93ba00325cd05466eb1
from __future__ import annotations\n\nimport sys\nfrom typing import TYPE_CHECKING, Any\n\nif TYPE_CHECKING:\n import cudf\n import dask.dataframe as dd\n import ibis\n import modin.pandas as mpd\n import pandas as pd\n import polars as pl\n import pyarrow as pa\n from typing_extensions import TypeIs\n\n\nfrom narwhals.dependencies import (\n IMPORT_HOOKS,\n get_cudf,\n get_dask_dataframe,\n get_ibis,\n get_modin,\n get_numpy,\n get_pandas,\n get_polars,\n get_pyarrow,\n is_into_dataframe,\n is_into_series,\n is_narwhals_dataframe,\n is_narwhals_lazyframe,\n is_narwhals_series,\n is_numpy_array,\n is_pandas_index,\n)\n\n\ndef is_pandas_dataframe(df: Any) -> TypeIs[pd.DataFrame]:\n """Check whether `df` is a pandas DataFrame without importing pandas."""\n return ((pd := get_pandas()) is not None and isinstance(df, pd.DataFrame)) or any(\n (mod := sys.modules.get(module_name, None)) is not None\n and isinstance(df, mod.pandas.DataFrame)\n for module_name in IMPORT_HOOKS\n )\n\n\ndef is_pandas_series(ser: Any) -> TypeIs[pd.Series[Any]]:\n """Check whether `ser` is a pandas Series without importing pandas."""\n return ((pd := get_pandas()) is not None and isinstance(ser, pd.Series)) or any(\n (mod := sys.modules.get(module_name, None)) is not None\n and isinstance(ser, mod.pandas.Series)\n for module_name in IMPORT_HOOKS\n )\n\n\ndef is_modin_dataframe(df: Any) -> TypeIs[mpd.DataFrame]:\n """Check whether `df` is a modin DataFrame without importing modin."""\n return (mpd := get_modin()) is not None and isinstance(df, mpd.DataFrame)\n\n\ndef is_modin_series(ser: Any) -> TypeIs[mpd.Series]:\n """Check whether `ser` is a modin Series without importing modin."""\n return (mpd := get_modin()) is not None and isinstance(ser, mpd.Series)\n\n\ndef is_cudf_dataframe(df: Any) -> TypeIs[cudf.DataFrame]:\n """Check whether `df` is a cudf DataFrame without importing cudf."""\n return (cudf := get_cudf()) is not None and isinstance(df, cudf.DataFrame)\n\n\ndef is_cudf_series(ser: Any) -> TypeIs[cudf.Series[Any]]:\n """Check whether `ser` is a cudf Series without importing cudf."""\n return (cudf := get_cudf()) is not None and isinstance(ser, cudf.Series)\n\n\ndef is_dask_dataframe(df: Any) -> TypeIs[dd.DataFrame]:\n """Check whether `df` is a Dask DataFrame without importing Dask."""\n return (dd := get_dask_dataframe()) is not None and isinstance(df, dd.DataFrame)\n\n\ndef is_ibis_table(df: Any) -> TypeIs[ibis.Table]:\n """Check whether `df` is a Ibis Table without importing Ibis."""\n return (ibis := get_ibis()) is not None and isinstance(df, ibis.expr.types.Table)\n\n\ndef is_polars_dataframe(df: Any) -> TypeIs[pl.DataFrame]:\n """Check whether `df` is a Polars DataFrame without importing Polars."""\n return (pl := get_polars()) is not None and isinstance(df, pl.DataFrame)\n\n\ndef is_polars_lazyframe(df: Any) -> TypeIs[pl.LazyFrame]:\n """Check whether `df` is a Polars LazyFrame without importing Polars."""\n return (pl := get_polars()) is not None and isinstance(df, pl.LazyFrame)\n\n\ndef is_polars_series(ser: Any) -> TypeIs[pl.Series]:\n """Check whether `ser` is a Polars Series without importing Polars."""\n return (pl := get_polars()) is not None and isinstance(ser, pl.Series)\n\n\ndef is_pyarrow_chunked_array(ser: Any) -> TypeIs[pa.ChunkedArray[Any]]:\n """Check whether `ser` is a PyArrow ChunkedArray without importing PyArrow."""\n return (pa := get_pyarrow()) is not None and isinstance(ser, pa.ChunkedArray)\n\n\ndef is_pyarrow_table(df: Any) -> TypeIs[pa.Table]:\n """Check whether `df` is a PyArrow Table without importing PyArrow."""\n return (pa := get_pyarrow()) is not None and isinstance(df, pa.Table)\n\n\ndef is_pandas_like_dataframe(df: Any) -> bool:\n """Check whether `df` is a pandas-like DataFrame without doing any imports.\n\n By "pandas-like", we mean: pandas, Modin, cuDF.\n """\n return is_pandas_dataframe(df) or is_modin_dataframe(df) or is_cudf_dataframe(df)\n\n\ndef is_pandas_like_series(ser: Any) -> bool:\n """Check whether `ser` is a pandas-like Series without doing any imports.\n\n By "pandas-like", we mean: pandas, Modin, cuDF.\n """\n return is_pandas_series(ser) or is_modin_series(ser) or is_cudf_series(ser)\n\n\n__all__ = [\n "get_cudf",\n "get_ibis",\n "get_modin",\n "get_numpy",\n "get_pandas",\n "get_polars",\n "get_pyarrow",\n "is_cudf_dataframe",\n "is_cudf_series",\n "is_dask_dataframe",\n "is_ibis_table",\n "is_into_dataframe",\n "is_into_series",\n "is_modin_dataframe",\n "is_modin_series",\n "is_narwhals_dataframe",\n "is_narwhals_lazyframe",\n "is_narwhals_series",\n "is_numpy_array",\n "is_pandas_dataframe",\n "is_pandas_index",\n "is_pandas_like_dataframe",\n "is_pandas_like_series",\n "is_pandas_series",\n "is_polars_dataframe",\n "is_polars_lazyframe",\n "is_polars_series",\n "is_pyarrow_chunked_array",\n "is_pyarrow_table",\n]\n
.venv\Lib\site-packages\narwhals\stable\v1\dependencies.py
dependencies.py
Python
5,001
0.85
0.115385
0
vue-tools
692
2024-11-12T21:45:44.348970
Apache-2.0
false
1e3cc5eedeff52da165919d96ebcbcb0
from __future__ import annotations\n\nfrom narwhals.stable.v1._dtypes import (\n Array,\n Binary,\n Boolean,\n Categorical,\n Date,\n Datetime,\n Decimal,\n DType,\n Duration,\n Enum,\n Field,\n Float32,\n Float64,\n FloatType,\n Int8,\n Int16,\n Int32,\n Int64,\n Int128,\n IntegerType,\n List,\n NestedType,\n NumericType,\n Object,\n SignedIntegerType,\n String,\n Struct,\n Time,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n UInt128,\n Unknown,\n UnsignedIntegerType,\n)\n\n__all__ = [\n "Array",\n "Binary",\n "Boolean",\n "Categorical",\n "DType",\n "Date",\n "Datetime",\n "Decimal",\n "Duration",\n "Enum",\n "Field",\n "Float32",\n "Float64",\n "FloatType",\n "Int8",\n "Int16",\n "Int32",\n "Int64",\n "Int128",\n "IntegerType",\n "List",\n "NestedType",\n "NumericType",\n "Object",\n "SignedIntegerType",\n "String",\n "Struct",\n "Time",\n "UInt8",\n "UInt16",\n "UInt32",\n "UInt64",\n "UInt128",\n "Unknown",\n "UnsignedIntegerType",\n]\n
.venv\Lib\site-packages\narwhals\stable\v1\dtypes.py
dtypes.py
Python
1,082
0.85
0
0
node-utils
271
2023-12-17T19:29:32.393645
Apache-2.0
false
66989ce0e5e2ab7790761770209f972b
from __future__ import annotations\n\nfrom narwhals.selectors import (\n all,\n boolean,\n by_dtype,\n categorical,\n datetime,\n matches,\n numeric,\n string,\n)\n\n__all__ = [\n "all",\n "boolean",\n "by_dtype",\n "categorical",\n "datetime",\n "matches",\n "numeric",\n "string",\n]\n
.venv\Lib\site-packages\narwhals\stable\v1\selectors.py
selectors.py
Python
312
0.85
0
0
python-kit
7
2024-03-24T19:16:21.474101
Apache-2.0
false
fe4093548be83c4d5a705f16424e7ae8
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Protocol, TypeVar, Union\n\nif TYPE_CHECKING:\n import sys\n\n from narwhals.stable.v1 import DataFrame, LazyFrame\n\n if sys.version_info >= (3, 10):\n from typing import TypeAlias\n else:\n from typing_extensions import TypeAlias\n\n from narwhals.stable.v1 import Expr, Series, dtypes\n\n # All dataframes supported by Narwhals have a\n # `columns` property. Their similarities don't extend\n # _that_ much further unfortunately...\n class NativeFrame(Protocol):\n @property\n def columns(self) -> Any: ...\n\n def join(self, *args: Any, **kwargs: Any) -> Any: ...\n\n class NativeSeries(Protocol):\n def __len__(self) -> int: ...\n\n class DataFrameLike(Protocol):\n def __dataframe__(self, *args: Any, **kwargs: Any) -> Any: ...\n\n\nIntoExpr: TypeAlias = Union["Expr", str, "Series[Any]"]\n"""Anything which can be converted to an expression.\n\nUse this to mean "either a Narwhals expression, or something\nwhich can be converted into one". For example, `exprs` in `DataFrame.select` is\ntyped to accept `IntoExpr`, as it can either accept a `nw.Expr`\n(e.g. `df.select(nw.col('a'))`) or a string which will be interpreted as a\n`nw.Expr`, e.g. `df.select('a')`.\n"""\n\nIntoDataFrame: TypeAlias = Union["NativeFrame", "DataFrame[Any]", "DataFrameLike"]\n"""Anything which can be converted to a Narwhals DataFrame.\n\nUse this if your function accepts a narwhalifiable object but doesn't care about its backend.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoDataFrame\n >>> def agnostic_shape(df_native: IntoDataFrame) -> tuple[int, int]:\n ... df = nw.from_native(df_native, eager_only=True)\n ... return df.shape\n"""\n\nIntoFrame: TypeAlias = Union[\n "NativeFrame", "DataFrame[Any]", "LazyFrame[Any]", "DataFrameLike"\n]\n"""Anything which can be converted to a Narwhals DataFrame or LazyFrame.\n\nUse this if your function can accept an object which can be converted to either\n`nw.DataFrame` or `nw.LazyFrame` and it doesn't care about its backend.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoFrame\n >>> def agnostic_columns(df_native: IntoFrame) -> list[str]:\n ... df = nw.from_native(df_native)\n ... return df.collect_schema().names()\n"""\n\nFrame: TypeAlias = Union["DataFrame[Any]", "LazyFrame[Any]"]\n"""Narwhals DataFrame or Narwhals LazyFrame.\n\nUse this if your function can work with either and your function doesn't care\nabout its backend.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import Frame\n >>> @nw.narwhalify\n ... def agnostic_columns(df: Frame) -> list[str]:\n ... return df.columns\n"""\n\nIntoSeries: TypeAlias = Union["Series[Any]", "NativeSeries"]\n"""Anything which can be converted to a Narwhals Series.\n\nUse this if your function can accept an object which can be converted to `nw.Series`\nand it doesn't care about its backend.\n\nExamples:\n >>> from typing import Any\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoSeries\n >>> def agnostic_to_list(s_native: IntoSeries) -> list[Any]:\n ... s = nw.from_native(s_native)\n ... return s.to_list()\n"""\n\nIntoFrameT = TypeVar("IntoFrameT", bound="IntoFrame")\n"""TypeVar bound to object convertible to Narwhals DataFrame or Narwhals LazyFrame.\n\nUse this if your function accepts an object which is convertible to `nw.DataFrame`\nor `nw.LazyFrame` and returns an object of the same type.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoFrameT\n >>> def agnostic_func(df_native: IntoFrameT) -> IntoFrameT:\n ... df = nw.from_native(df_native)\n ... return df.with_columns(c=nw.col("a") + 1).to_native()\n"""\n\nIntoDataFrameT = TypeVar("IntoDataFrameT", bound="IntoDataFrame")\n"""TypeVar bound to object convertible to Narwhals DataFrame.\n\nUse this if your function accepts an object which can be converted to `nw.DataFrame`\nand returns an object of the same class.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoDataFrameT\n >>> def agnostic_func(df_native: IntoDataFrameT) -> IntoDataFrameT:\n ... df = nw.from_native(df_native, eager_only=True)\n ... return df.with_columns(c=df["a"] + 1).to_native()\n"""\n\nFrameT = TypeVar("FrameT", "DataFrame[Any]", "LazyFrame[Any]")\n"""TypeVar bound to Narwhals DataFrame or Narwhals LazyFrame.\n\nUse this if your function accepts either `nw.DataFrame` or `nw.LazyFrame` and returns\nan object of the same kind.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import FrameT\n >>> @nw.narwhalify\n ... def agnostic_func(df: FrameT) -> FrameT:\n ... return df.with_columns(c=nw.col("a") + 1)\n"""\n\nDataFrameT = TypeVar("DataFrameT", bound="DataFrame[Any]")\n"""TypeVar bound to Narwhals DataFrame.\n\nUse this if your function can accept a Narwhals DataFrame and returns a Narwhals\nDataFrame backed by the same backend.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import DataFrameT\n >>> @nw.narwhalify\n >>> def func(df: DataFrameT) -> DataFrameT:\n ... return df.with_columns(c=df["a"] + 1)\n"""\n\nIntoSeriesT = TypeVar("IntoSeriesT", bound="IntoSeries")\n"""TypeVar bound to object convertible to Narwhals Series.\n\nUse this if your function accepts an object which can be converted to `nw.Series`\nand returns an object of the same class.\n\nExamples:\n >>> import narwhals as nw\n >>> from narwhals.typing import IntoSeriesT\n >>> def agnostic_abs(s_native: IntoSeriesT) -> IntoSeriesT:\n ... s = nw.from_native(s_native, series_only=True)\n ... return s.abs().to_native()\n"""\n\n\nclass DTypes:\n Int64: type[dtypes.Int64]\n Int32: type[dtypes.Int32]\n Int16: type[dtypes.Int16]\n Int8: type[dtypes.Int8]\n UInt64: type[dtypes.UInt64]\n UInt32: type[dtypes.UInt32]\n UInt16: type[dtypes.UInt16]\n UInt8: type[dtypes.UInt8]\n Float64: type[dtypes.Float64]\n Float32: type[dtypes.Float32]\n String: type[dtypes.String]\n Boolean: type[dtypes.Boolean]\n Object: type[dtypes.Object]\n Categorical: type[dtypes.Categorical]\n Enum: type[dtypes.Enum]\n Datetime: type[dtypes.Datetime]\n Duration: type[dtypes.Duration]\n Date: type[dtypes.Date]\n Field: type[dtypes.Field]\n Struct: type[dtypes.Struct]\n List: type[dtypes.List]\n Array: type[dtypes.Array]\n Unknown: type[dtypes.Unknown]\n\n\n__all__ = [\n "DataFrameT",\n "Frame",\n "FrameT",\n "IntoDataFrame",\n "IntoDataFrameT",\n "IntoExpr",\n "IntoFrame",\n "IntoFrameT",\n "IntoSeries",\n "IntoSeriesT",\n]\n
.venv\Lib\site-packages\narwhals\stable\v1\typing.py
typing.py
Python
6,896
0.95
0.191388
0.018072
node-utils
657
2025-02-27T17:41:07.167363
Apache-2.0
false
51fa6f64b0f2dffa2925febabf35530d
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._utils import inherit_doc\nfrom narwhals.dtypes import (\n Array,\n Binary,\n Boolean,\n Categorical,\n Date,\n Datetime as NwDatetime,\n Decimal,\n DType,\n Duration as NwDuration,\n Enum as NwEnum,\n Field,\n Float32,\n Float64,\n FloatType,\n Int8,\n Int16,\n Int32,\n Int64,\n Int128,\n IntegerType,\n List,\n NestedType,\n NumericType,\n Object,\n SignedIntegerType,\n String,\n Struct,\n Time,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n UInt128,\n Unknown,\n UnsignedIntegerType,\n)\n\nif TYPE_CHECKING:\n from datetime import timezone\n\n from narwhals.typing import TimeUnit\n\n\nclass Datetime(NwDatetime):\n @inherit_doc(NwDatetime)\n def __init__(\n self, time_unit: TimeUnit = "us", time_zone: str | timezone | None = None\n ) -> None:\n super().__init__(time_unit, time_zone)\n\n def __hash__(self) -> int:\n return hash(self.__class__)\n\n\nclass Duration(NwDuration):\n @inherit_doc(NwDuration)\n def __init__(self, time_unit: TimeUnit = "us") -> None:\n super().__init__(time_unit)\n\n def __hash__(self) -> int:\n return hash(self.__class__)\n\n\nclass Enum(NwEnum):\n """A fixed categorical encoding of a unique set of strings.\n\n Polars has an Enum data type, while pandas and PyArrow do not.\n\n Examples:\n >>> import polars as pl\n >>> import narwhals.stable.v1 as nw\n >>> data = ["beluga", "narwhal", "orca"]\n >>> s_native = pl.Series(data, dtype=pl.Enum(data))\n >>> nw.from_native(s_native, series_only=True).dtype\n Enum\n """\n\n def __init__(self) -> None:\n super(NwEnum, self).__init__()\n\n def __eq__(self, other: DType | type[DType]) -> bool: # type: ignore[override]\n if type(other) is type:\n return other in {type(self), NwEnum}\n return isinstance(other, type(self))\n\n def __hash__(self) -> int: # pragma: no cover\n return super(NwEnum, self).__hash__()\n\n def __repr__(self) -> str: # pragma: no cover\n return super(NwEnum, self).__repr__()\n\n\n__all__ = [\n "Array",\n "Binary",\n "Boolean",\n "Categorical",\n "DType",\n "Date",\n "Datetime",\n "Decimal",\n "Duration",\n "Enum",\n "Field",\n "Float32",\n "Float64",\n "FloatType",\n "Int8",\n "Int16",\n "Int32",\n "Int64",\n "Int128",\n "IntegerType",\n "List",\n "NestedType",\n "NumericType",\n "Object",\n "SignedIntegerType",\n "String",\n "Struct",\n "Time",\n "UInt8",\n "UInt16",\n "UInt32",\n "UInt64",\n "UInt128",\n "Unknown",\n "UnsignedIntegerType",\n]\n
.venv\Lib\site-packages\narwhals\stable\v1\_dtypes.py
_dtypes.py
Python
2,700
0.95
0.103704
0
node-utils
769
2024-02-26T03:31:49.075981
MIT
false
6c2066551925542577d34487564052f8
from __future__ import annotations\n\nfrom narwhals._compliant.typing import CompliantNamespaceT_co\nfrom narwhals._namespace import Namespace as NwNamespace\nfrom narwhals._utils import Version\n\n__all__ = ["Namespace"]\n\n\nclass Namespace(NwNamespace[CompliantNamespaceT_co], version=Version.V1): ...\n
.venv\Lib\site-packages\narwhals\stable\v1\_namespace.py
_namespace.py
Python
296
0.85
0.1
0
react-lib
857
2023-09-20T03:45:39.019482
GPL-3.0
false
8d522a621f933df053aae5cca3f539be
from __future__ import annotations\n\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Any, Callable, Literal, cast, overload\nfrom warnings import warn\n\nimport narwhals as nw\nfrom narwhals import exceptions, functions as nw_f\nfrom narwhals._typing_compat import TypeVar, assert_never\nfrom narwhals._utils import (\n Implementation,\n Version,\n deprecate_native_namespace,\n find_stacklevel,\n generate_temporary_column_name,\n inherit_doc,\n is_ordered_categorical,\n maybe_align_index,\n maybe_convert_dtypes,\n maybe_get_index,\n maybe_reset_index,\n maybe_set_index,\n validate_strict_and_pass_though,\n)\nfrom narwhals.dataframe import DataFrame as NwDataFrame, LazyFrame as NwLazyFrame\nfrom narwhals.dependencies import get_polars\nfrom narwhals.exceptions import InvalidIntoExprError\nfrom narwhals.expr import Expr as NwExpr\nfrom narwhals.functions import _new_series_impl, concat, show_versions\nfrom narwhals.schema import Schema as NwSchema\nfrom narwhals.series import Series as NwSeries\nfrom narwhals.stable.v1 import dependencies, dtypes, selectors\nfrom narwhals.stable.v1.dtypes import (\n Array,\n Binary,\n Boolean,\n Categorical,\n Date,\n Datetime,\n Decimal,\n Duration,\n Enum,\n Field,\n Float32,\n Float64,\n Int8,\n Int16,\n Int32,\n Int64,\n Int128,\n List,\n Object,\n String,\n Struct,\n Time,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n UInt128,\n Unknown,\n)\nfrom narwhals.translate import _from_native_impl, get_native_namespace, to_py_scalar\nfrom narwhals.typing import IntoDataFrameT, IntoFrameT\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Mapping, Sequence\n from types import ModuleType\n\n from typing_extensions import ParamSpec, Self\n\n from narwhals._translate import IntoArrowTable\n from narwhals.dataframe import MultiColSelector, MultiIndexSelector\n from narwhals.dtypes import DType\n from narwhals.typing import (\n IntoDType,\n IntoExpr,\n IntoFrame,\n IntoLazyFrameT,\n IntoSeries,\n NonNestedLiteral,\n SingleColSelector,\n SingleIndexSelector,\n _1DArray,\n _2DArray,\n )\n\n DataFrameT = TypeVar("DataFrameT", bound="DataFrame[Any]")\n LazyFrameT = TypeVar("LazyFrameT", bound="LazyFrame[Any]")\n SeriesT = TypeVar("SeriesT", bound="Series[Any]")\n T = TypeVar("T", default=Any)\n P = ParamSpec("P")\n R = TypeVar("R")\n\nIntoSeriesT = TypeVar("IntoSeriesT", bound="IntoSeries", default=Any)\n\n\nclass DataFrame(NwDataFrame[IntoDataFrameT]):\n @inherit_doc(NwDataFrame)\n def __init__(self, df: Any, *, level: Literal["full", "lazy", "interchange"]) -> None:\n assert df._version is Version.V1 # noqa: S101\n super().__init__(df, level=level)\n\n # We need to override any method which don't return Self so that type\n # annotations are correct.\n\n @property\n def _series(self) -> type[Series[Any]]:\n return cast("type[Series[Any]]", Series)\n\n @property\n def _lazyframe(self) -> type[LazyFrame[Any]]:\n return cast("type[LazyFrame[Any]]", LazyFrame)\n\n @overload\n def __getitem__(self, item: tuple[SingleIndexSelector, SingleColSelector]) -> Any: ...\n\n @overload\n def __getitem__( # type: ignore[overload-overlap]\n self, item: str | tuple[MultiIndexSelector, SingleColSelector]\n ) -> Series[Any]: ...\n\n @overload\n def __getitem__(\n self,\n item: (\n SingleIndexSelector\n | MultiIndexSelector\n | MultiColSelector\n | tuple[SingleIndexSelector, MultiColSelector]\n | tuple[MultiIndexSelector, MultiColSelector]\n ),\n ) -> Self: ...\n def __getitem__(\n self,\n item: (\n SingleIndexSelector\n | SingleColSelector\n | MultiColSelector\n | MultiIndexSelector\n | tuple[SingleIndexSelector, SingleColSelector]\n | tuple[SingleIndexSelector, MultiColSelector]\n | tuple[MultiIndexSelector, SingleColSelector]\n | tuple[MultiIndexSelector, MultiColSelector]\n ),\n ) -> Series[Any] | Self | Any:\n return super().__getitem__(item)\n\n def lazy(\n self, backend: ModuleType | Implementation | str | None = None\n ) -> LazyFrame[Any]:\n return _stableify(super().lazy(backend=backend))\n\n @overload # type: ignore[override]\n def to_dict(self, *, as_series: Literal[True] = ...) -> dict[str, Series[Any]]: ...\n @overload\n def to_dict(self, *, as_series: Literal[False]) -> dict[str, list[Any]]: ...\n @overload\n def to_dict(\n self, *, as_series: bool\n ) -> dict[str, Series[Any]] | dict[str, list[Any]]: ...\n def to_dict(\n self, *, as_series: bool = True\n ) -> dict[str, Series[Any]] | dict[str, list[Any]]:\n # Type checkers complain that `nw.Series` is not assignable to `nw.v1.stable.Series`.\n # However the return type actually is `nw.v1.stable.Series`, check `tests/v1_test.py::test_to_dict_as_series`.\n return super().to_dict(as_series=as_series) # type: ignore[return-value]\n\n def is_duplicated(self) -> Series[Any]:\n return _stableify(super().is_duplicated())\n\n def is_unique(self) -> Series[Any]:\n return _stableify(super().is_unique())\n\n def _l1_norm(self) -> Self:\n """Private, just used to test the stable API.\n\n Returns:\n A new DataFrame.\n """\n return self.select(all()._l1_norm())\n\n\nclass LazyFrame(NwLazyFrame[IntoFrameT]):\n @inherit_doc(NwLazyFrame)\n def __init__(self, df: Any, *, level: Literal["full", "lazy", "interchange"]) -> None:\n assert df._version is Version.V1 # noqa: S101\n super().__init__(df, level=level)\n\n @property\n def _dataframe(self) -> type[DataFrame[Any]]:\n return DataFrame\n\n def _extract_compliant(self, arg: Any) -> Any:\n # After v1, we raise when passing order-dependent or length-changing\n # expressions to LazyFrame\n from narwhals.dataframe import BaseFrame\n from narwhals.expr import Expr\n from narwhals.series import Series\n\n if isinstance(arg, BaseFrame):\n return arg._compliant_frame\n if isinstance(arg, Series): # pragma: no cover\n msg = "Mixing Series with LazyFrame is not supported."\n raise TypeError(msg)\n if isinstance(arg, Expr):\n # After stable.v1, we raise for order-dependent exprs or filtrations\n return arg._to_compliant_expr(self.__narwhals_namespace__())\n if isinstance(arg, str):\n plx = self.__narwhals_namespace__()\n return plx.col(arg)\n if get_polars() is not None and "polars" in str(type(arg)): # pragma: no cover\n msg = (\n f"Expected Narwhals object, got: {type(arg)}.\n\n"\n "Perhaps you:\n"\n "- Forgot a `nw.from_native` somewhere?\n"\n "- Used `pl.col` instead of `nw.col`?"\n )\n raise TypeError(msg)\n raise InvalidIntoExprError.from_invalid_type(type(arg))\n\n def collect(\n self, backend: ModuleType | Implementation | str | None = None, **kwargs: Any\n ) -> DataFrame[Any]:\n return _stableify(super().collect(backend=backend, **kwargs))\n\n def _l1_norm(self) -> Self:\n """Private, just used to test the stable API.\n\n Returns:\n A new lazyframe.\n """\n return self.select(all()._l1_norm())\n\n def tail(self, n: int = 5) -> Self:\n r"""Get the last `n` rows.\n\n Arguments:\n n: Number of rows to return.\n\n Returns:\n A subset of the LazyFrame of shape (n, n_columns).\n """\n return super().tail(n)\n\n def gather_every(self, n: int, offset: int = 0) -> Self:\n r"""Take every nth row in the DataFrame and return as a new DataFrame.\n\n Arguments:\n n: Gather every *n*-th row.\n offset: Starting index.\n\n Returns:\n The LazyFrame containing only the selected rows.\n """\n return self._with_compliant(\n self._compliant_frame.gather_every(n=n, offset=offset)\n )\n\n def with_row_index(\n self, name: str = "index", *, order_by: str | Sequence[str] | None = None\n ) -> Self:\n """Insert column which enumerates rows.\n\n Arguments:\n name: The name of the column as a string. The default is "index".\n order_by: Column(s) to order by when computing the row index.\n\n Returns:\n The original object with the column added.\n """\n order_by_ = [order_by] if isinstance(order_by, str) else order_by\n return self._with_compliant(\n self._compliant_frame.with_row_index(\n name=name,\n order_by=order_by_, # type: ignore[arg-type]\n )\n )\n\n\nclass Series(NwSeries[IntoSeriesT]):\n @inherit_doc(NwSeries)\n def __init__(\n self, series: Any, *, level: Literal["full", "lazy", "interchange"]\n ) -> None:\n assert series._version is Version.V1 # noqa: S101\n super().__init__(series, level=level)\n\n # We need to override any method which don't return Self so that type\n # annotations are correct.\n\n @property\n def _dataframe(self) -> type[DataFrame[Any]]:\n return DataFrame\n\n def to_frame(self) -> DataFrame[Any]:\n return _stableify(super().to_frame())\n\n def value_counts(\n self,\n *,\n sort: bool = False,\n parallel: bool = False,\n name: str | None = None,\n normalize: bool = False,\n ) -> DataFrame[Any]:\n return _stableify(\n super().value_counts(\n sort=sort, parallel=parallel, name=name, normalize=normalize\n )\n )\n\n def hist(\n self,\n bins: list[float | int] | None = None,\n *,\n bin_count: int | None = None,\n include_breakpoint: bool = True,\n ) -> DataFrame[Any]:\n from narwhals._utils import find_stacklevel\n from narwhals.exceptions import NarwhalsUnstableWarning\n\n msg = (\n "`Series.hist` is being called from the stable API although considered "\n "an unstable feature."\n )\n warn(message=msg, category=NarwhalsUnstableWarning, stacklevel=find_stacklevel())\n return _stableify(\n super().hist(\n bins=bins, bin_count=bin_count, include_breakpoint=include_breakpoint\n )\n )\n\n\nclass Expr(NwExpr):\n def _l1_norm(self) -> Self:\n return super()._taxicab_norm()\n\n def head(self, n: int = 10) -> Self:\n r"""Get the first `n` rows.\n\n Arguments:\n n: Number of rows to return.\n\n Returns:\n A new expression.\n """\n return self._with_orderable_filtration(\n lambda plx: self._to_compliant_expr(plx).head(n)\n )\n\n def tail(self, n: int = 10) -> Self:\n r"""Get the last `n` rows.\n\n Arguments:\n n: Number of rows to return.\n\n Returns:\n A new expression.\n """\n return self._with_orderable_filtration(\n lambda plx: self._to_compliant_expr(plx).tail(n)\n )\n\n def gather_every(self, n: int, offset: int = 0) -> Self:\n r"""Take every nth value in the Series and return as new Series.\n\n Arguments:\n n: Gather every *n*-th row.\n offset: Starting index.\n\n Returns:\n A new expression.\n """\n return self._with_orderable_filtration(\n lambda plx: self._to_compliant_expr(plx).gather_every(n=n, offset=offset)\n )\n\n def unique(self, *, maintain_order: bool | None = None) -> Self:\n """Return unique values of this expression.\n\n Arguments:\n maintain_order: Keep the same order as the original expression.\n This is deprecated and will be removed in a future version,\n but will still be kept around in `narwhals.stable.v1`.\n\n Returns:\n A new expression.\n """\n if maintain_order is not None:\n msg = (\n "`maintain_order` has no effect and is only kept around for backwards-compatibility. "\n "You can safely remove this argument."\n )\n warn(message=msg, category=UserWarning, stacklevel=find_stacklevel())\n return self._with_filtration(lambda plx: self._to_compliant_expr(plx).unique())\n\n def sort(self, *, descending: bool = False, nulls_last: bool = False) -> Self:\n """Sort this column. Place null values first.\n\n Arguments:\n descending: Sort in descending order.\n nulls_last: Place null values last instead of first.\n\n Returns:\n A new expression.\n """\n return self._with_unorderable_window(\n lambda plx: self._to_compliant_expr(plx).sort(\n descending=descending, nulls_last=nulls_last\n )\n )\n\n def arg_true(self) -> Self:\n """Find elements where boolean expression is True.\n\n Returns:\n A new expression.\n """\n return self._with_orderable_filtration(\n lambda plx: self._to_compliant_expr(plx).arg_true()\n )\n\n def sample(\n self,\n n: int | None = None,\n *,\n fraction: float | None = None,\n with_replacement: bool = False,\n seed: int | None = None,\n ) -> Self:\n """Sample randomly from this expression.\n\n Arguments:\n n: Number of items to return. Cannot be used with fraction.\n fraction: Fraction of items to return. Cannot be used with n.\n with_replacement: Allow values to be sampled more than once.\n seed: Seed for the random number generator. If set to None (default), a random\n seed is generated for each sample operation.\n\n Returns:\n A new expression.\n """\n return self._with_filtration(\n lambda plx: self._to_compliant_expr(plx).sample(\n n, fraction=fraction, with_replacement=with_replacement, seed=seed\n )\n )\n\n\nclass Schema(NwSchema):\n _version = Version.V1\n\n @inherit_doc(NwSchema)\n def __init__(\n self, schema: Mapping[str, DType] | Iterable[tuple[str, DType]] | None = None\n ) -> None:\n super().__init__(schema)\n\n\n@overload\ndef _stableify(obj: NwDataFrame[IntoFrameT]) -> DataFrame[IntoFrameT]: ...\n@overload\ndef _stableify(obj: NwLazyFrame[IntoFrameT]) -> LazyFrame[IntoFrameT]: ...\n@overload\ndef _stableify(obj: NwSeries[IntoSeriesT]) -> Series[IntoSeriesT]: ...\n@overload\ndef _stableify(obj: NwExpr) -> Expr: ...\n\n\ndef _stableify(\n obj: NwDataFrame[IntoFrameT]\n | NwLazyFrame[IntoFrameT]\n | NwSeries[IntoSeriesT]\n | NwExpr,\n) -> DataFrame[IntoFrameT] | LazyFrame[IntoFrameT] | Series[IntoSeriesT] | Expr:\n if isinstance(obj, NwDataFrame):\n return DataFrame(obj._compliant_frame._with_version(Version.V1), level=obj._level)\n if isinstance(obj, NwLazyFrame):\n return LazyFrame(obj._compliant_frame._with_version(Version.V1), level=obj._level)\n if isinstance(obj, NwSeries):\n return Series(obj._compliant_series._with_version(Version.V1), level=obj._level)\n if isinstance(obj, NwExpr):\n return Expr(obj._to_compliant_expr, obj._metadata)\n assert_never(obj)\n\n\n@overload\ndef from_native(native_object: SeriesT, **kwds: Any) -> SeriesT: ...\n\n\n@overload\ndef from_native(native_object: DataFrameT, **kwds: Any) -> DataFrameT: ...\n\n\n@overload\ndef from_native(native_object: LazyFrameT, **kwds: Any) -> LazyFrameT: ...\n\n\n@overload\ndef from_native(\n native_object: DataFrameT | LazyFrameT, **kwds: Any\n) -> DataFrameT | LazyFrameT: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT | IntoSeriesT,\n *,\n strict: Literal[False],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[IntoDataFrameT] | Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT | IntoSeriesT,\n *,\n strict: Literal[False],\n eager_only: Literal[True],\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[IntoDataFrameT] | Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n strict: Literal[False],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: T,\n *,\n strict: Literal[False],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> T: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n strict: Literal[False],\n eager_only: Literal[True],\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: T,\n *,\n strict: Literal[False],\n eager_only: Literal[True],\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> T: ...\n\n\n@overload\ndef from_native(\n native_object: IntoFrameT | IntoSeriesT,\n *,\n strict: Literal[False],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[IntoFrameT] | LazyFrame[IntoFrameT] | Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoSeriesT,\n *,\n strict: Literal[False],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[True],\n allow_series: None = ...,\n) -> Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoFrameT,\n *,\n strict: Literal[False],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoFrameT] | LazyFrame[IntoFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: T,\n *,\n strict: Literal[False],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> T: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n strict: Literal[True] = ...,\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n strict: Literal[True] = ...,\n eager_only: Literal[True],\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoFrame | IntoSeries,\n *,\n strict: Literal[True] = ...,\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[Any] | LazyFrame[Any] | Series[Any]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoSeriesT,\n *,\n strict: Literal[True] = ...,\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[True],\n allow_series: None = ...,\n) -> Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoLazyFrameT,\n *,\n strict: Literal[True] = ...,\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> LazyFrame[IntoLazyFrameT]: ...\n\n\n# NOTE: `pl.LazyFrame` originally matched here\n@overload\ndef from_native(\n native_object: IntoFrameT,\n *,\n strict: Literal[True] = ...,\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoFrameT] | LazyFrame[IntoFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT | IntoSeries,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT | IntoSeriesT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[True],\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[IntoDataFrameT] | Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: T,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> T: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[True],\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: T,\n *,\n pass_through: Literal[True],\n eager_only: Literal[True],\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> T: ...\n\n\n@overload\ndef from_native(\n native_object: IntoFrameT | IntoSeriesT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[IntoFrameT] | LazyFrame[IntoFrameT] | Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoSeriesT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[True],\n allow_series: None = ...,\n) -> Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoFrameT,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoFrameT] | LazyFrame[IntoFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: T,\n *,\n pass_through: Literal[True],\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> T: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[True],\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoDataFrameT,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[True],\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoDataFrameT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoFrame | IntoSeries,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: Literal[True],\n) -> DataFrame[Any] | LazyFrame[Any] | Series[Any]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoSeriesT,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[True],\n allow_series: None = ...,\n) -> Series[IntoSeriesT]: ...\n\n\n@overload\ndef from_native(\n native_object: IntoFrameT,\n *,\n pass_through: Literal[False] = ...,\n eager_only: Literal[False] = ...,\n eager_or_interchange_only: Literal[False] = ...,\n series_only: Literal[False] = ...,\n allow_series: None = ...,\n) -> DataFrame[IntoFrameT] | LazyFrame[IntoFrameT]: ...\n\n\n# All params passed in as variables\n@overload\ndef from_native(\n native_object: Any,\n *,\n pass_through: bool,\n eager_only: bool,\n eager_or_interchange_only: bool = False,\n series_only: bool,\n allow_series: bool | None,\n) -> Any: ...\n\n\ndef from_native( # noqa: D417\n native_object: IntoFrameT | IntoFrame | IntoSeriesT | IntoSeries | T,\n *,\n strict: bool | None = None,\n pass_through: bool | None = None,\n eager_only: bool = False,\n eager_or_interchange_only: bool = False,\n series_only: bool = False,\n allow_series: bool | None = None,\n **kwds: Any,\n) -> LazyFrame[IntoFrameT] | DataFrame[IntoFrameT] | Series[IntoSeriesT] | T:\n """Convert `native_object` to Narwhals Dataframe, Lazyframe, or Series.\n\n Arguments:\n native_object: Raw object from user.\n Depending on the other arguments, input object can be\n\n - a Dataframe / Lazyframe / Series supported by Narwhals (pandas, Polars, PyArrow, ...)\n - an object which implements `__narwhals_dataframe__`, `__narwhals_lazyframe__`,\n or `__narwhals_series__`\n strict: Determine what happens if the object can't be converted to Narwhals\n\n - `True` or `None` (default): raise an error\n - `False`: pass object through as-is\n\n *Deprecated* (v1.13.0)\n\n Please use `pass_through` instead. Note that `strict` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n pass_through: Determine what happens if the object can't be converted to Narwhals\n\n - `False` or `None` (default): raise an error\n - `True`: pass object through as-is\n eager_only: Whether to only allow eager objects\n\n - `False` (default): don't require `native_object` to be eager\n - `True`: only convert to Narwhals if `native_object` is eager\n eager_or_interchange_only: Whether to only allow eager objects or objects which\n have interchange-level support in Narwhals\n\n - `False` (default): don't require `native_object` to either be eager or to\n have interchange-level support in Narwhals\n - `True`: only convert to Narwhals if `native_object` is eager or has\n interchange-level support in Narwhals\n\n See [interchange-only support](../extending.md/#interchange-only-support)\n for more details.\n series_only: Whether to only allow Series\n\n - `False` (default): don't require `native_object` to be a Series\n - `True`: only convert to Narwhals if `native_object` is a Series\n allow_series: Whether to allow Series (default is only Dataframe / Lazyframe)\n\n - `False` or `None` (default): don't convert to Narwhals if `native_object` is a Series\n - `True`: allow `native_object` to be a Series\n\n Returns:\n DataFrame, LazyFrame, Series, or original object, depending\n on which combination of parameters was passed.\n """\n # Early returns\n if isinstance(native_object, (DataFrame, LazyFrame)) and not series_only:\n return native_object\n if isinstance(native_object, Series) and (series_only or allow_series):\n return native_object\n\n pass_through = validate_strict_and_pass_though(\n strict, pass_through, pass_through_default=False, emit_deprecation_warning=False\n )\n if kwds:\n msg = f"from_native() got an unexpected keyword argument {next(iter(kwds))!r}"\n raise TypeError(msg)\n\n return _from_native_impl( # type: ignore[no-any-return]\n native_object,\n pass_through=pass_through,\n eager_only=eager_only,\n eager_or_interchange_only=eager_or_interchange_only,\n series_only=series_only,\n allow_series=allow_series,\n version=Version.V1,\n )\n\n\n@overload\ndef to_native(\n narwhals_object: DataFrame[IntoDataFrameT], *, strict: Literal[True] = ...\n) -> IntoDataFrameT: ...\n@overload\ndef to_native(\n narwhals_object: LazyFrame[IntoFrameT], *, strict: Literal[True] = ...\n) -> IntoFrameT: ...\n@overload\ndef to_native(\n narwhals_object: Series[IntoSeriesT], *, strict: Literal[True] = ...\n) -> IntoSeriesT: ...\n@overload\ndef to_native(narwhals_object: Any, *, strict: bool) -> Any: ...\n@overload\ndef to_native(\n narwhals_object: DataFrame[IntoDataFrameT], *, pass_through: Literal[False] = ...\n) -> IntoDataFrameT: ...\n@overload\ndef to_native(\n narwhals_object: LazyFrame[IntoFrameT], *, pass_through: Literal[False] = ...\n) -> IntoFrameT: ...\n@overload\ndef to_native(\n narwhals_object: Series[IntoSeriesT], *, pass_through: Literal[False] = ...\n) -> IntoSeriesT: ...\n@overload\ndef to_native(narwhals_object: Any, *, pass_through: bool) -> Any: ...\n\n\ndef to_native(\n narwhals_object: DataFrame[IntoDataFrameT]\n | LazyFrame[IntoFrameT]\n | Series[IntoSeriesT],\n *,\n strict: bool | None = None,\n pass_through: bool | None = None,\n) -> IntoFrameT | IntoSeriesT | Any:\n """Convert Narwhals object to native one.\n\n Arguments:\n narwhals_object: Narwhals object.\n strict: Determine what happens if `narwhals_object` isn't a Narwhals class\n\n - `True` (default): raise an error\n - `False`: pass object through as-is\n\n *Deprecated* (v1.13.0)\n\n Please use `pass_through` instead. Note that `strict` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n pass_through: Determine what happens if `narwhals_object` isn't a Narwhals class\n\n - `False` (default): raise an error\n - `True`: pass object through as-is\n\n Returns:\n Object of class that user started with.\n """\n from narwhals._utils import validate_strict_and_pass_though\n from narwhals.dataframe import BaseFrame\n from narwhals.series import Series\n\n pass_through = validate_strict_and_pass_though(\n strict, pass_through, pass_through_default=False, emit_deprecation_warning=False\n )\n\n if isinstance(narwhals_object, BaseFrame):\n return narwhals_object._compliant_frame._native_frame\n if isinstance(narwhals_object, Series):\n return narwhals_object._compliant_series.native\n\n if not pass_through:\n msg = f"Expected Narwhals object, got {type(narwhals_object)}."\n raise TypeError(msg)\n return narwhals_object\n\n\ndef narwhalify(\n func: Callable[..., Any] | None = None,\n *,\n strict: bool | None = None,\n pass_through: bool | None = None,\n eager_only: bool = False,\n eager_or_interchange_only: bool = False,\n series_only: bool = False,\n allow_series: bool | None = True,\n) -> Callable[..., Any]:\n """Decorate function so it becomes dataframe-agnostic.\n\n This will try to convert any dataframe/series-like object into the Narwhals\n respective DataFrame/Series, while leaving the other parameters as they are.\n Similarly, if the output of the function is a Narwhals DataFrame or Series, it will be\n converted back to the original dataframe/series type, while if the output is another\n type it will be left as is.\n By setting `pass_through=False`, then every input and every output will be required to be a\n dataframe/series-like object.\n\n Arguments:\n func: Function to wrap in a `from_native`-`to_native` block.\n strict: Determine what happens if the object can't be converted to Narwhals\n\n *Deprecated* (v1.13.0)\n\n Please use `pass_through` instead. Note that `strict` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n - `True` or `None` (default): raise an error\n - `False`: pass object through as-is\n pass_through: Determine what happens if the object can't be converted to Narwhals\n\n - `False` or `None` (default): raise an error\n - `True`: pass object through as-is\n eager_only: Whether to only allow eager objects\n\n - `False` (default): don't require `native_object` to be eager\n - `True`: only convert to Narwhals if `native_object` is eager\n eager_or_interchange_only: Whether to only allow eager objects or objects which\n have interchange-level support in Narwhals\n\n - `False` (default): don't require `native_object` to either be eager or to\n have interchange-level support in Narwhals\n - `True`: only convert to Narwhals if `native_object` is eager or has\n interchange-level support in Narwhals\n\n See [interchange-only support](../extending.md/#interchange-only-support)\n for more details.\n series_only: Whether to only allow Series\n\n - `False` (default): don't require `native_object` to be a Series\n - `True`: only convert to Narwhals if `native_object` is a Series\n allow_series: Whether to allow Series (default is only Dataframe / Lazyframe)\n\n - `False` or `None`: don't convert to Narwhals if `native_object` is a Series\n - `True` (default): allow `native_object` to be a Series\n\n Returns:\n Decorated function.\n """\n pass_through = validate_strict_and_pass_though(\n strict, pass_through, pass_through_default=True, emit_deprecation_warning=False\n )\n\n def decorator(func: Callable[..., Any]) -> Callable[..., Any]:\n @wraps(func)\n def wrapper(*args: Any, **kwargs: Any) -> Any:\n args = [\n from_native(\n arg,\n pass_through=pass_through,\n eager_only=eager_only,\n eager_or_interchange_only=eager_or_interchange_only,\n series_only=series_only,\n allow_series=allow_series,\n )\n for arg in args\n ] # type: ignore[assignment]\n\n kwargs = {\n name: from_native(\n value,\n pass_through=pass_through,\n eager_only=eager_only,\n eager_or_interchange_only=eager_or_interchange_only,\n series_only=series_only,\n allow_series=allow_series,\n )\n for name, value in kwargs.items()\n }\n\n backends = {\n b()\n for v in (*args, *kwargs.values())\n if (b := getattr(v, "__native_namespace__", None))\n }\n\n if backends.__len__() > 1:\n msg = "Found multiple backends. Make sure that all dataframe/series inputs come from the same backend."\n raise ValueError(msg)\n\n result = func(*args, **kwargs)\n\n return to_native(result, pass_through=pass_through)\n\n return wrapper\n\n if func is None:\n return decorator\n else:\n # If func is not None, it means the decorator is used without arguments\n return decorator(func)\n\n\ndef all() -> Expr:\n """Instantiate an expression representing all columns.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.all())\n\n\ndef col(*names: str | Iterable[str]) -> Expr:\n """Creates an expression that references one or more columns by their name(s).\n\n Arguments:\n names: Name(s) of the columns to use.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.col(*names))\n\n\ndef exclude(*names: str | Iterable[str]) -> Expr:\n """Creates an expression that excludes columns by their name(s).\n\n Arguments:\n names: Name(s) of the columns to exclude.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.exclude(*names))\n\n\ndef nth(*indices: int | Sequence[int]) -> Expr:\n """Creates an expression that references one or more columns by their index(es).\n\n Notes:\n `nth` is not supported for Polars version<1.0.0. Please use\n [`narwhals.col`][] instead.\n\n Arguments:\n indices: One or more indices representing the columns to retrieve.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.nth(*indices))\n\n\ndef len() -> Expr:\n """Return the number of rows.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.len())\n\n\ndef lit(value: NonNestedLiteral, dtype: IntoDType | None = None) -> Expr:\n """Return an expression representing a literal value.\n\n Arguments:\n value: The value to use as literal.\n dtype: The data type of the literal value. If not provided, the data type will\n be inferred by the native library.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.lit(value, dtype))\n\n\ndef min(*columns: str) -> Expr:\n """Return the minimum value.\n\n Note:\n Syntactic sugar for ``nw.col(columns).min()``.\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.min(*columns))\n\n\ndef max(*columns: str) -> Expr:\n """Return the maximum value.\n\n Note:\n Syntactic sugar for ``nw.col(columns).max()``.\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.max(*columns))\n\n\ndef mean(*columns: str) -> Expr:\n """Get the mean value.\n\n Note:\n Syntactic sugar for ``nw.col(columns).mean()``\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function\n\n Returns:\n A new expression.\n """\n return _stableify(nw.mean(*columns))\n\n\ndef median(*columns: str) -> Expr:\n """Get the median value.\n\n Notes:\n - Syntactic sugar for ``nw.col(columns).median()``\n - Results might slightly differ across backends due to differences in the\n underlying algorithms used to compute the median.\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function\n\n Returns:\n A new expression.\n """\n return _stableify(nw.median(*columns))\n\n\ndef sum(*columns: str) -> Expr:\n """Sum all values.\n\n Note:\n Syntactic sugar for ``nw.col(columns).sum()``\n\n Arguments:\n columns: Name(s) of the columns to use in the aggregation function\n\n Returns:\n A new expression.\n """\n return _stableify(nw.sum(*columns))\n\n\ndef sum_horizontal(*exprs: IntoExpr | Iterable[IntoExpr]) -> Expr:\n """Sum all values horizontally across columns.\n\n Warning:\n Unlike Polars, we support horizontal sum over numeric columns only.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.sum_horizontal(*exprs))\n\n\ndef all_horizontal(\n *exprs: IntoExpr | Iterable[IntoExpr], ignore_nulls: bool = False\n) -> Expr:\n r"""Compute the bitwise AND horizontally across columns.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n ignore_nulls: Whether to ignore nulls:\n\n - If `True`, null values are ignored. If there are no elements, the result\n is `True`.\n - If `False` (default), Kleene logic is followed. Note that this is not allowed for\n pandas with classical NumPy dtypes when null values are present.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.all_horizontal(*exprs, ignore_nulls=ignore_nulls))\n\n\ndef any_horizontal(\n *exprs: IntoExpr | Iterable[IntoExpr], ignore_nulls: bool = False\n) -> Expr:\n r"""Compute the bitwise OR horizontally across columns.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n ignore_nulls: Whether to ignore nulls:\n\n - If `True`, null values are ignored. If there are no elements, the result\n is `False`.\n - If `False` (default), Kleene logic is followed. Note that this is not allowed for\n pandas with classical NumPy dtypes when null values are present.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.any_horizontal(*exprs, ignore_nulls=ignore_nulls))\n\n\ndef mean_horizontal(*exprs: IntoExpr | Iterable[IntoExpr]) -> Expr:\n """Compute the mean of all values horizontally across columns.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.mean_horizontal(*exprs))\n\n\ndef min_horizontal(*exprs: IntoExpr | Iterable[IntoExpr]) -> Expr:\n """Get the minimum value horizontally across columns.\n\n Notes:\n We support `min_horizontal` over numeric columns only.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.min_horizontal(*exprs))\n\n\ndef max_horizontal(*exprs: IntoExpr | Iterable[IntoExpr]) -> Expr:\n """Get the maximum value horizontally across columns.\n\n Notes:\n We support `max_horizontal` over numeric columns only.\n\n Arguments:\n exprs: Name(s) of the columns to use in the aggregation function. Accepts\n expression input.\n\n Returns:\n A new expression.\n """\n return _stableify(nw.max_horizontal(*exprs))\n\n\ndef concat_str(\n exprs: IntoExpr | Iterable[IntoExpr],\n *more_exprs: IntoExpr,\n separator: str = "",\n ignore_nulls: bool = False,\n) -> Expr:\n r"""Horizontally concatenate columns into a single string column.\n\n Arguments:\n exprs: Columns to concatenate into a single string column. Accepts expression\n input. Strings are parsed as column names, other non-expression inputs are\n parsed as literals. Non-`String` columns are cast to `String`.\n *more_exprs: Additional columns to concatenate into a single string column,\n specified as positional arguments.\n separator: String that will be used to separate the values of each column.\n ignore_nulls: Ignore null values (default is `False`).\n If set to `False`, null values will be propagated and if the row contains any\n null values, the output is null.\n\n Returns:\n A new expression.\n """\n return _stableify(\n nw.concat_str(exprs, *more_exprs, separator=separator, ignore_nulls=ignore_nulls)\n )\n\n\ndef get_level(\n obj: DataFrame[Any] | LazyFrame[Any] | Series[IntoSeriesT],\n) -> Literal["full", "lazy", "interchange"]:\n """Level of support Narwhals has for current object.\n\n Arguments:\n obj: Dataframe or Series.\n\n Returns:\n This can be one of\n\n - 'full': full Narwhals API support\n - 'lazy': only lazy operations are supported. This excludes anything\n which involves iterating over rows in Python.\n - 'interchange': only metadata operations are supported (`df.schema`)\n """\n return obj._level\n\n\nclass When(nw_f.When):\n @classmethod\n def from_when(cls, when: nw_f.When) -> When:\n return cls(when._predicate)\n\n def then(self, value: IntoExpr | NonNestedLiteral | _1DArray) -> Then:\n return Then.from_then(super().then(value))\n\n\nclass Then(nw_f.Then, Expr):\n @classmethod\n def from_then(cls, then: nw_f.Then) -> Then:\n return cls(then._to_compliant_expr, then._metadata)\n\n def otherwise(self, value: IntoExpr | NonNestedLiteral | _1DArray) -> Expr:\n return _stableify(super().otherwise(value))\n\n\ndef when(*predicates: IntoExpr | Iterable[IntoExpr]) -> When:\n """Start a `when-then-otherwise` expression.\n\n Expression similar to an `if-else` statement in Python. Always initiated by a\n `pl.when(<condition>).then(<value if condition>)`, and optionally followed by a\n `.otherwise(<value if condition is false>)` can be appended at the end. If not\n appended, and the condition is not `True`, `None` will be returned.\n\n Info:\n Chaining multiple `.when(<condition>).then(<value>)` statements is currently\n not supported.\n See [Narwhals#668](https://github.com/narwhals-dev/narwhals/issues/668).\n\n Arguments:\n predicates: Condition(s) that must be met in order to apply the subsequent\n statement. Accepts one or more boolean expressions, which are implicitly\n combined with `&`. String input is parsed as a column name.\n\n Returns:\n A "when" object, which `.then` can be called on.\n """\n return When.from_when(nw_f.when(*predicates))\n\n\n@deprecate_native_namespace(required=True)\ndef new_series(\n name: str,\n values: Any,\n dtype: IntoDType | None = None,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n) -> Series[Any]:\n """Instantiate Narwhals Series from iterable (e.g. list or array).\n\n Arguments:\n name: Name of resulting Series.\n values: Values of make Series from.\n dtype: (Narwhals) dtype. If not provided, the native library\n may auto-infer it from `values`.\n backend: specifies which eager backend instantiate to.\n\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n Returns:\n A new Series\n """\n backend = cast("ModuleType | Implementation | str", backend)\n return _stableify(_new_series_impl(name, values, dtype, backend=backend))\n\n\n@deprecate_native_namespace(required=True)\ndef from_arrow(\n native_frame: IntoArrowTable,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n) -> DataFrame[Any]:\n """Construct a DataFrame from an object which supports the PyCapsule Interface.\n\n Arguments:\n native_frame: Object which implements `__arrow_c_stream__`.\n backend: specifies which eager backend instantiate to.\n\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n Returns:\n A new DataFrame.\n """\n backend = cast("ModuleType | Implementation | str", backend)\n return _stableify(nw_f.from_arrow(native_frame, backend=backend))\n\n\n@deprecate_native_namespace()\ndef from_dict(\n data: Mapping[str, Any],\n schema: Mapping[str, DType] | Schema | None = None,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n) -> DataFrame[Any]:\n """Instantiate DataFrame from dictionary.\n\n Indexes (if present, for pandas-like backends) are aligned following\n the [left-hand-rule](../concepts/pandas_index.md/).\n\n Notes:\n For pandas-like dataframes, conversion to schema is applied after dataframe\n creation.\n\n Arguments:\n data: Dictionary to create DataFrame from.\n schema: The DataFrame schema as Schema or dict of {name: type}. If not\n specified, the schema will be inferred by the native library.\n backend: specifies which eager backend instantiate to. Only\n necessary if inputs are not Narwhals Series.\n\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.26.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n Returns:\n A new DataFrame.\n """\n return _stableify(nw_f.from_dict(data, schema, backend=backend))\n\n\n@deprecate_native_namespace(required=True)\ndef from_numpy(\n data: _2DArray,\n schema: Mapping[str, DType] | Schema | Sequence[str] | None = None,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n) -> DataFrame[Any]:\n """Construct a DataFrame from a NumPy ndarray.\n\n Notes:\n Only row orientation is currently supported.\n\n For pandas-like dataframes, conversion to schema is applied after dataframe\n creation.\n\n Arguments:\n data: Two-dimensional data represented as a NumPy ndarray.\n schema: The DataFrame schema as Schema, dict of {name: type}, or a sequence of str.\n backend: specifies which eager backend instantiate to.\n\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n\n Returns:\n A new DataFrame.\n """\n backend = cast("ModuleType | Implementation | str", backend)\n return _stableify(nw_f.from_numpy(data, schema, backend=backend))\n\n\n@deprecate_native_namespace(required=True)\ndef read_csv(\n source: str,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n **kwargs: Any,\n) -> DataFrame[Any]:\n """Read a CSV file into a DataFrame.\n\n Arguments:\n source: Path to a file.\n backend: The eager backend for DataFrame creation.\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.27.2)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n kwargs: Extra keyword arguments which are passed to the native CSV reader.\n For example, you could use\n `nw.read_csv('file.csv', backend='pandas', engine='pyarrow')`.\n\n Returns:\n DataFrame.\n """\n backend = cast("ModuleType | Implementation | str", backend)\n return _stableify(nw_f.read_csv(source, backend=backend, **kwargs))\n\n\n@deprecate_native_namespace(required=True)\ndef scan_csv(\n source: str,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n **kwargs: Any,\n) -> LazyFrame[Any]:\n """Lazily read from a CSV file.\n\n For the libraries that do not support lazy dataframes, the function reads\n a csv file eagerly and then converts the resulting dataframe to a lazyframe.\n\n Arguments:\n source: Path to a file.\n backend: The eager backend for DataFrame creation.\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n kwargs: Extra keyword arguments which are passed to the native CSV reader.\n For example, you could use\n `nw.scan_csv('file.csv', backend=pd, engine='pyarrow')`.\n\n Returns:\n LazyFrame.\n """\n backend = cast("ModuleType | Implementation | str", backend)\n return _stableify(nw_f.scan_csv(source, backend=backend, **kwargs))\n\n\n@deprecate_native_namespace(required=True)\ndef read_parquet(\n source: str,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n **kwargs: Any,\n) -> DataFrame[Any]:\n """Read into a DataFrame from a parquet file.\n\n Arguments:\n source: Path to a file.\n backend: The eager backend for DataFrame creation.\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN` or `CUDF`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"` or `"cudf"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin` or `cudf`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n kwargs: Extra keyword arguments which are passed to the native parquet reader.\n For example, you could use\n `nw.read_parquet('file.parquet', backend=pd, engine='pyarrow')`.\n\n Returns:\n DataFrame.\n """\n backend = cast("ModuleType | Implementation | str", backend)\n return _stableify(nw_f.read_parquet(source, backend=backend, **kwargs))\n\n\n@deprecate_native_namespace(required=True)\ndef scan_parquet(\n source: str,\n *,\n backend: ModuleType | Implementation | str | None = None,\n native_namespace: ModuleType | None = None, # noqa: ARG001\n **kwargs: Any,\n) -> LazyFrame[Any]:\n """Lazily read from a parquet file.\n\n For the libraries that do not support lazy dataframes, the function reads\n a parquet file eagerly and then converts the resulting dataframe to a lazyframe.\n\n Note:\n Spark like backends require a session object to be passed in `kwargs`.\n\n For instance:\n\n ```py\n import narwhals as nw\n from sqlframe.duckdb import DuckDBSession\n\n nw.scan_parquet(source, backend="sqlframe", session=DuckDBSession())\n ```\n\n Arguments:\n source: Path to a file.\n backend: The eager backend for DataFrame creation.\n `backend` can be specified in various ways\n\n - As `Implementation.<BACKEND>` with `BACKEND` being `PANDAS`, `PYARROW`,\n `POLARS`, `MODIN`, `CUDF`, `PYSPARK` or `SQLFRAME`.\n - As a string: `"pandas"`, `"pyarrow"`, `"polars"`, `"modin"`, `"cudf"`,\n `"pyspark"` or `"sqlframe"`.\n - Directly as a module `pandas`, `pyarrow`, `polars`, `modin`, `cudf`,\n `pyspark.sql` or `sqlframe`.\n native_namespace: The native library to use for DataFrame creation.\n\n *Deprecated* (v1.31.0)\n\n Please use `backend` instead. Note that `native_namespace` is still available\n (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,\n see [perfect backwards compatibility policy](../backcompat.md/).\n kwargs: Extra keyword arguments which are passed to the native parquet reader.\n For example, you could use\n `nw.scan_parquet('file.parquet', backend=pd, engine='pyarrow')`.\n\n Returns:\n LazyFrame.\n """\n backend = cast("ModuleType | Implementation | str", backend)\n return _stableify(nw_f.scan_parquet(source, backend=backend, **kwargs))\n\n\n__all__ = [\n "Array",\n "Binary",\n "Boolean",\n "Categorical",\n "DataFrame",\n "Date",\n "Datetime",\n "Decimal",\n "Duration",\n "Enum",\n "Expr",\n "Field",\n "Float32",\n "Float64",\n "Implementation",\n "Int8",\n "Int16",\n "Int32",\n "Int64",\n "Int128",\n "LazyFrame",\n "List",\n "Object",\n "Schema",\n "Series",\n "String",\n "Struct",\n "Time",\n "UInt8",\n "UInt16",\n "UInt32",\n "UInt64",\n "UInt128",\n "Unknown",\n "all",\n "all_horizontal",\n "any_horizontal",\n "col",\n "concat",\n "concat_str",\n "dependencies",\n "dtypes",\n "exceptions",\n "exclude",\n "from_arrow",\n "from_dict",\n "from_native",\n "from_numpy",\n "generate_temporary_column_name",\n "get_level",\n "get_native_namespace",\n "is_ordered_categorical",\n "len",\n "lit",\n "max",\n "max_horizontal",\n "maybe_align_index",\n "maybe_convert_dtypes",\n "maybe_get_index",\n "maybe_reset_index",\n "maybe_set_index",\n "mean",\n "mean_horizontal",\n "median",\n "min",\n "min_horizontal",\n "narwhalify",\n "new_series",\n "nth",\n "read_csv",\n "read_parquet",\n "scan_csv",\n "scan_parquet",\n "selectors",\n "show_versions",\n "sum",\n "sum_horizontal",\n "to_native",\n "to_py_scalar",\n "when",\n]\n
.venv\Lib\site-packages\narwhals\stable\v1\__init__.py
__init__.py
Python
60,635
0.75
0.123438
0.050902
react-lib
502
2024-04-18T14:42:45.248967
GPL-3.0
false
0e8be9a35b413f664ccdd8a34bc29b02
\n\n
.venv\Lib\site-packages\narwhals\stable\v1\__pycache__\dependencies.cpython-313.pyc
dependencies.cpython-313.pyc
Other
7,784
0.95
0
0
react-lib
729
2024-01-30T20:24:48.441437
Apache-2.0
false
944ff5c488a1606ee256715f6005284e
\n\n
.venv\Lib\site-packages\narwhals\stable\v1\__pycache__\dtypes.cpython-313.pyc
dtypes.cpython-313.pyc
Other
1,164
0.8
0
0
node-utils
505
2025-02-14T15:22:41.857148
BSD-3-Clause
false
805d7916b77c43cdb81b36fb44f77507
\n\n
.venv\Lib\site-packages\narwhals\stable\v1\__pycache__\selectors.cpython-313.pyc
selectors.cpython-313.pyc
Other
457
0.7
0
0
awesome-app
332
2024-09-03T09:27:27.603558
Apache-2.0
false
314fdbea20951f6367b43b5226393d6b
\n\n
.venv\Lib\site-packages\narwhals\stable\v1\__pycache__\typing.cpython-313.pyc
typing.cpython-313.pyc
Other
3,925
0.8
0
0
awesome-app
886
2025-01-26T21:17:14.971678
GPL-3.0
false
4734fd89baf3cdc6306e30fe3cf9eba6
\n\n
.venv\Lib\site-packages\narwhals\stable\v1\__pycache__\_dtypes.cpython-313.pyc
_dtypes.cpython-313.pyc
Other
4,332
0.95
0.022727
0
node-utils
517
2024-04-06T18:19:47.467140
MIT
false
1fbea52bb014730bfd1107022a606cb5
\n\n
.venv\Lib\site-packages\narwhals\stable\v1\__pycache__\_namespace.cpython-313.pyc
_namespace.cpython-313.pyc
Other
729
0.8
0
0
node-utils
678
2023-09-08T23:46:30.819963
BSD-3-Clause
false
127abf8b4df08bac90f3c5439c0890ee
\n\n
.venv\Lib\site-packages\narwhals\stable\v1\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
70,287
0.75
0.069906
0.016145
node-utils
478
2023-12-04T20:54:09.139245
MIT
false
17eb307465b37770170479802484c31b
\n\n
.venv\Lib\site-packages\narwhals\stable\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
309
0.7
0
0
react-lib
555
2024-01-16T12:07:52.963696
BSD-3-Clause
false
f02653170b2a6e68b669e4ef44b3f922
from __future__ import annotations\n\nfrom collections.abc import Collection, Iterator, Mapping, Sequence\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Literal, cast, overload\n\nimport pyarrow as pa\nimport pyarrow.compute as pc\n\nfrom narwhals._arrow.series import ArrowSeries\nfrom narwhals._arrow.utils import native_to_narwhals_dtype\nfrom narwhals._compliant import EagerDataFrame\nfrom narwhals._expression_parsing import ExprKind\nfrom narwhals._utils import (\n Implementation,\n Version,\n check_column_names_are_unique,\n convert_str_slice_to_int_slice,\n generate_temporary_column_name,\n not_implemented,\n parse_columns_to_drop,\n parse_version,\n scale_bytes,\n supports_arrow_c_stream,\n validate_backend_version,\n)\nfrom narwhals.dependencies import is_numpy_array_1d\nfrom narwhals.exceptions import ShapeError\n\nif TYPE_CHECKING:\n from io import BytesIO\n from pathlib import Path\n from types import ModuleType\n\n import pandas as pd\n import polars as pl\n from typing_extensions import Self, TypeAlias, TypeIs\n\n from narwhals._arrow.expr import ArrowExpr\n from narwhals._arrow.group_by import ArrowGroupBy\n from narwhals._arrow.namespace import ArrowNamespace\n from narwhals._arrow.typing import ( # type: ignore[attr-defined]\n ChunkedArrayAny,\n Mask,\n Order,\n )\n from narwhals._compliant.typing import CompliantDataFrameAny, CompliantLazyFrameAny\n from narwhals._translate import IntoArrowTable\n from narwhals._utils import Version, _FullContext\n from narwhals.dtypes import DType\n from narwhals.schema import Schema\n from narwhals.typing import (\n JoinStrategy,\n SizedMultiIndexSelector,\n SizedMultiNameSelector,\n SizeUnit,\n UniqueKeepStrategy,\n _1DArray,\n _2DArray,\n _SliceIndex,\n _SliceName,\n )\n\n JoinType: TypeAlias = Literal[\n "left semi",\n "right semi",\n "left anti",\n "right anti",\n "inner",\n "left outer",\n "right outer",\n "full outer",\n ]\n PromoteOptions: TypeAlias = Literal["none", "default", "permissive"]\n\n\nclass ArrowDataFrame(\n EagerDataFrame["ArrowSeries", "ArrowExpr", "pa.Table", "ChunkedArrayAny"]\n):\n def __init__(\n self,\n native_dataframe: pa.Table,\n *,\n backend_version: tuple[int, ...],\n version: Version,\n validate_column_names: bool,\n ) -> None:\n if validate_column_names:\n check_column_names_are_unique(native_dataframe.column_names)\n self._native_frame = native_dataframe\n self._implementation = Implementation.PYARROW\n self._backend_version = backend_version\n self._version = version\n validate_backend_version(self._implementation, self._backend_version)\n\n @classmethod\n def from_arrow(cls, data: IntoArrowTable, /, *, context: _FullContext) -> Self:\n backend_version = context._backend_version\n if cls._is_native(data):\n native = data\n elif backend_version >= (14,) or isinstance(data, Collection):\n native = pa.table(data)\n elif supports_arrow_c_stream(data): # pragma: no cover\n msg = f"'pyarrow>=14.0.0' is required for `from_arrow` for object of type {type(data).__name__!r}."\n raise ModuleNotFoundError(msg)\n else: # pragma: no cover\n msg = f"`from_arrow` is not supported for object of type {type(data).__name__!r}."\n raise TypeError(msg)\n return cls.from_native(native, context=context)\n\n @classmethod\n def from_dict(\n cls,\n data: Mapping[str, Any],\n /,\n *,\n context: _FullContext,\n schema: Mapping[str, DType] | Schema | None,\n ) -> Self:\n from narwhals.schema import Schema\n\n pa_schema = Schema(schema).to_arrow() if schema is not None else schema\n native = pa.Table.from_pydict(data, schema=pa_schema)\n return cls.from_native(native, context=context)\n\n @staticmethod\n def _is_native(obj: pa.Table | Any) -> TypeIs[pa.Table]:\n return isinstance(obj, pa.Table)\n\n @classmethod\n def from_native(cls, data: pa.Table, /, *, context: _FullContext) -> Self:\n return cls(\n data,\n backend_version=context._backend_version,\n version=context._version,\n validate_column_names=True,\n )\n\n @classmethod\n def from_numpy(\n cls,\n data: _2DArray,\n /,\n *,\n context: _FullContext,\n schema: Mapping[str, DType] | Schema | Sequence[str] | None,\n ) -> Self:\n from narwhals.schema import Schema\n\n arrays = [pa.array(val) for val in data.T]\n if isinstance(schema, (Mapping, Schema)):\n native = pa.Table.from_arrays(arrays, schema=Schema(schema).to_arrow())\n else:\n native = pa.Table.from_arrays(arrays, cls._numpy_column_names(data, schema))\n return cls.from_native(native, context=context)\n\n def __narwhals_namespace__(self) -> ArrowNamespace:\n from narwhals._arrow.namespace import ArrowNamespace\n\n return ArrowNamespace(\n backend_version=self._backend_version, version=self._version\n )\n\n def __native_namespace__(self) -> ModuleType:\n if self._implementation is Implementation.PYARROW:\n return self._implementation.to_native_namespace()\n\n msg = f"Expected pyarrow, got: {type(self._implementation)}" # pragma: no cover\n raise AssertionError(msg)\n\n def __narwhals_dataframe__(self) -> Self:\n return self\n\n def __narwhals_lazyframe__(self) -> Self:\n return self\n\n def _with_version(self, version: Version) -> Self:\n return self.__class__(\n self.native,\n backend_version=self._backend_version,\n version=version,\n validate_column_names=False,\n )\n\n def _with_native(self, df: pa.Table, *, validate_column_names: bool = True) -> Self:\n return self.__class__(\n df,\n backend_version=self._backend_version,\n version=self._version,\n validate_column_names=validate_column_names,\n )\n\n @property\n def shape(self) -> tuple[int, int]:\n return self.native.shape\n\n def __len__(self) -> int:\n return len(self.native)\n\n def row(self, index: int) -> tuple[Any, ...]:\n return tuple(col[index] for col in self.native.itercolumns())\n\n @overload\n def rows(self, *, named: Literal[True]) -> list[dict[str, Any]]: ...\n\n @overload\n def rows(self, *, named: Literal[False]) -> list[tuple[Any, ...]]: ...\n\n @overload\n def rows(self, *, named: bool) -> list[tuple[Any, ...]] | list[dict[str, Any]]: ...\n\n def rows(self, *, named: bool) -> list[tuple[Any, ...]] | list[dict[str, Any]]:\n if not named:\n return list(self.iter_rows(named=False, buffer_size=512)) # type: ignore[return-value]\n return self.native.to_pylist()\n\n def iter_columns(self) -> Iterator[ArrowSeries]:\n for name, series in zip(self.columns, self.native.itercolumns()):\n yield ArrowSeries.from_native(series, context=self, name=name)\n\n _iter_columns = iter_columns\n\n def iter_rows(\n self, *, named: bool, buffer_size: int\n ) -> Iterator[tuple[Any, ...]] | Iterator[dict[str, Any]]:\n df = self.native\n num_rows = df.num_rows\n\n if not named:\n for i in range(0, num_rows, buffer_size):\n rows = df[i : i + buffer_size].to_pydict().values()\n yield from zip(*rows)\n else:\n for i in range(0, num_rows, buffer_size):\n yield from df[i : i + buffer_size].to_pylist()\n\n def get_column(self, name: str) -> ArrowSeries:\n if not isinstance(name, str):\n msg = f"Expected str, got: {type(name)}"\n raise TypeError(msg)\n return ArrowSeries.from_native(self.native[name], context=self, name=name)\n\n def __array__(self, dtype: Any, *, copy: bool | None) -> _2DArray:\n return self.native.__array__(dtype, copy=copy)\n\n def _gather(self, rows: SizedMultiIndexSelector[ChunkedArrayAny]) -> Self:\n if len(rows) == 0:\n return self._with_native(self.native.slice(0, 0))\n if self._backend_version < (18,) and isinstance(rows, tuple):\n rows = list(rows)\n return self._with_native(self.native.take(rows))\n\n def _gather_slice(self, rows: _SliceIndex | range) -> Self:\n start = rows.start or 0\n stop = rows.stop if rows.stop is not None else len(self.native)\n if start < 0:\n start = len(self.native) + start\n if stop < 0:\n stop = len(self.native) + stop\n if rows.step is not None and rows.step != 1:\n msg = "Slicing with step is not supported on PyArrow tables"\n raise NotImplementedError(msg)\n return self._with_native(self.native.slice(start, stop - start))\n\n def _select_slice_name(self, columns: _SliceName) -> Self:\n start, stop, step = convert_str_slice_to_int_slice(columns, self.columns)\n return self._with_native(self.native.select(self.columns[start:stop:step]))\n\n def _select_slice_index(self, columns: _SliceIndex | range) -> Self:\n return self._with_native(\n self.native.select(self.columns[columns.start : columns.stop : columns.step])\n )\n\n def _select_multi_index(\n self, columns: SizedMultiIndexSelector[ChunkedArrayAny]\n ) -> Self:\n selector: Sequence[int]\n if isinstance(columns, pa.ChunkedArray):\n # TODO @dangotbanned: Fix upstream with `pa.ChunkedArray.to_pylist(self) -> list[Any]:`\n selector = cast("Sequence[int]", columns.to_pylist())\n # TODO @dangotbanned: Fix upstream, it is actually much narrower\n # **Doesn't accept `ndarray`**\n elif is_numpy_array_1d(columns):\n selector = columns.tolist()\n else:\n selector = columns\n return self._with_native(self.native.select(selector))\n\n def _select_multi_name(\n self, columns: SizedMultiNameSelector[ChunkedArrayAny]\n ) -> Self:\n selector: Sequence[str] | _1DArray\n if isinstance(columns, pa.ChunkedArray):\n # TODO @dangotbanned: Fix upstream with `pa.ChunkedArray.to_pylist(self) -> list[Any]:`\n selector = cast("Sequence[str]", columns.to_pylist())\n else:\n selector = columns\n # NOTE: Fixed in https://github.com/zen-xu/pyarrow-stubs/pull/221\n return self._with_native(self.native.select(selector)) # pyright: ignore[reportArgumentType]\n\n @property\n def schema(self) -> dict[str, DType]:\n schema = self.native.schema\n return {\n name: native_to_narwhals_dtype(dtype, self._version)\n for name, dtype in zip(schema.names, schema.types)\n }\n\n def collect_schema(self) -> dict[str, DType]:\n return self.schema\n\n def estimated_size(self, unit: SizeUnit) -> int | float:\n sz = self.native.nbytes\n return scale_bytes(sz, unit)\n\n explode = not_implemented()\n\n @property\n def columns(self) -> list[str]:\n return self.native.column_names\n\n def simple_select(self, *column_names: str) -> Self:\n return self._with_native(\n self.native.select(list(column_names)), validate_column_names=False\n )\n\n def select(self, *exprs: ArrowExpr) -> Self:\n new_series = self._evaluate_into_exprs(*exprs)\n if not new_series:\n # return empty dataframe, like Polars does\n return self._with_native(\n self.native.__class__.from_arrays([]), validate_column_names=False\n )\n names = [s.name for s in new_series]\n align = new_series[0]._align_full_broadcast\n reshaped = align(*new_series)\n df = pa.Table.from_arrays([s.native for s in reshaped], names=names)\n return self._with_native(df, validate_column_names=True)\n\n def _extract_comparand(self, other: ArrowSeries) -> ChunkedArrayAny:\n length = len(self)\n if not other._broadcast:\n if (len_other := len(other)) != length:\n msg = f"Expected object of length {length}, got: {len_other}."\n raise ShapeError(msg)\n return other.native\n\n value = other.native[0]\n return pa.chunked_array([pa.repeat(value, length)])\n\n def with_columns(self, *exprs: ArrowExpr) -> Self:\n # NOTE: We use a faux-mutable variable and repeatedly "overwrite" (native_frame)\n # All `pyarrow` data is immutable, so this is fine\n native_frame = self.native\n new_columns = self._evaluate_into_exprs(*exprs)\n columns = self.columns\n\n for col_value in new_columns:\n col_name = col_value.name\n column = self._extract_comparand(col_value)\n native_frame = (\n native_frame.set_column(columns.index(col_name), col_name, column=column)\n if col_name in columns\n else native_frame.append_column(col_name, column=column)\n )\n\n return self._with_native(native_frame, validate_column_names=False)\n\n def group_by(\n self, keys: Sequence[str] | Sequence[ArrowExpr], *, drop_null_keys: bool\n ) -> ArrowGroupBy:\n from narwhals._arrow.group_by import ArrowGroupBy\n\n return ArrowGroupBy(self, keys, drop_null_keys=drop_null_keys)\n\n def join(\n self,\n other: Self,\n *,\n how: JoinStrategy,\n left_on: Sequence[str] | None,\n right_on: Sequence[str] | None,\n suffix: str,\n ) -> Self:\n how_to_join_map: dict[str, JoinType] = {\n "anti": "left anti",\n "semi": "left semi",\n "inner": "inner",\n "left": "left outer",\n "full": "full outer",\n }\n\n if how == "cross":\n plx = self.__narwhals_namespace__()\n key_token = generate_temporary_column_name(\n n_bytes=8, columns=[*self.columns, *other.columns]\n )\n\n return self._with_native(\n self.with_columns(\n plx.lit(0, None).alias(key_token).broadcast(ExprKind.LITERAL)\n )\n .native.join(\n other.with_columns(\n plx.lit(0, None).alias(key_token).broadcast(ExprKind.LITERAL)\n ).native,\n keys=key_token,\n right_keys=key_token,\n join_type="inner",\n right_suffix=suffix,\n )\n .drop([key_token])\n )\n\n coalesce_keys = how != "full" # polars full join does not coalesce keys\n return self._with_native(\n self.native.join(\n other.native,\n keys=left_on or [], # type: ignore[arg-type]\n right_keys=right_on, # type: ignore[arg-type]\n join_type=how_to_join_map[how],\n right_suffix=suffix,\n coalesce_keys=coalesce_keys,\n )\n )\n\n join_asof = not_implemented()\n\n def drop(self, columns: Sequence[str], *, strict: bool) -> Self:\n to_drop = parse_columns_to_drop(self, columns, strict=strict)\n return self._with_native(self.native.drop(to_drop), validate_column_names=False)\n\n def drop_nulls(self, subset: Sequence[str] | None) -> Self:\n if subset is None:\n return self._with_native(self.native.drop_null(), validate_column_names=False)\n plx = self.__narwhals_namespace__()\n mask = ~plx.any_horizontal(plx.col(*subset).is_null(), ignore_nulls=True)\n return self.filter(mask)\n\n def sort(self, *by: str, descending: bool | Sequence[bool], nulls_last: bool) -> Self:\n if isinstance(descending, bool):\n order: Order = "descending" if descending else "ascending"\n sorting: list[tuple[str, Order]] = [(key, order) for key in by]\n else:\n sorting = [\n (key, "descending" if is_descending else "ascending")\n for key, is_descending in zip(by, descending)\n ]\n\n null_placement = "at_end" if nulls_last else "at_start"\n\n return self._with_native(\n self.native.sort_by(sorting, null_placement=null_placement),\n validate_column_names=False,\n )\n\n def to_pandas(self) -> pd.DataFrame:\n return self.native.to_pandas()\n\n def to_polars(self) -> pl.DataFrame:\n import polars as pl # ignore-banned-import\n\n return pl.from_arrow(self.native) # type: ignore[return-value]\n\n def to_numpy(self, dtype: Any = None, *, copy: bool | None = None) -> _2DArray:\n import numpy as np # ignore-banned-import\n\n arr: Any = np.column_stack([col.to_numpy() for col in self.native.columns])\n return arr\n\n @overload\n def to_dict(self, *, as_series: Literal[True]) -> dict[str, ArrowSeries]: ...\n\n @overload\n def to_dict(self, *, as_series: Literal[False]) -> dict[str, list[Any]]: ...\n\n def to_dict(\n self, *, as_series: bool\n ) -> dict[str, ArrowSeries] | dict[str, list[Any]]:\n it = self.iter_columns()\n if as_series:\n return {ser.name: ser for ser in it}\n return {ser.name: ser.to_list() for ser in it}\n\n def with_row_index(self, name: str, order_by: Sequence[str] | None) -> Self:\n plx = self.__narwhals_namespace__()\n if order_by is None:\n import numpy as np # ignore-banned-import\n\n data = pa.array(np.arange(len(self), dtype=np.int64))\n row_index = plx._expr._from_series(\n plx._series.from_iterable(data, context=self, name=name)\n )\n else:\n rank = plx.col(order_by[0]).rank("ordinal", descending=False)\n row_index = (rank.over(partition_by=[], order_by=order_by) - 1).alias(name)\n return self.select(row_index, plx.all())\n\n def filter(self, predicate: ArrowExpr | list[bool | None]) -> Self:\n if isinstance(predicate, list):\n mask_native: Mask | ChunkedArrayAny = predicate\n else:\n # `[0]` is safe as the predicate's expression only returns a single column\n mask_native = self._evaluate_into_exprs(predicate)[0].native\n return self._with_native(\n self.native.filter(mask_native), validate_column_names=False\n )\n\n def head(self, n: int) -> Self:\n df = self.native\n if n >= 0:\n return self._with_native(df.slice(0, n), validate_column_names=False)\n else:\n num_rows = df.num_rows\n return self._with_native(\n df.slice(0, max(0, num_rows + n)), validate_column_names=False\n )\n\n def tail(self, n: int) -> Self:\n df = self.native\n if n >= 0:\n num_rows = df.num_rows\n return self._with_native(\n df.slice(max(0, num_rows - n)), validate_column_names=False\n )\n else:\n return self._with_native(df.slice(abs(n)), validate_column_names=False)\n\n def lazy(self, *, backend: Implementation | None = None) -> CompliantLazyFrameAny:\n if backend is None:\n return self\n elif backend is Implementation.DUCKDB:\n import duckdb # ignore-banned-import\n\n from narwhals._duckdb.dataframe import DuckDBLazyFrame\n\n df = self.native # noqa: F841\n return DuckDBLazyFrame(\n duckdb.table("df"),\n backend_version=parse_version(duckdb),\n version=self._version,\n )\n elif backend is Implementation.POLARS:\n import polars as pl # ignore-banned-import\n\n from narwhals._polars.dataframe import PolarsLazyFrame\n\n return PolarsLazyFrame(\n cast("pl.DataFrame", pl.from_arrow(self.native)).lazy(),\n backend_version=parse_version(pl),\n version=self._version,\n )\n elif backend is Implementation.DASK:\n import dask # ignore-banned-import\n import dask.dataframe as dd # ignore-banned-import\n\n from narwhals._dask.dataframe import DaskLazyFrame\n\n return DaskLazyFrame(\n dd.from_pandas(self.native.to_pandas()),\n backend_version=parse_version(dask),\n version=self._version,\n )\n raise AssertionError # pragma: no cover\n\n def collect(\n self, backend: Implementation | None, **kwargs: Any\n ) -> CompliantDataFrameAny:\n if backend is Implementation.PYARROW or backend is None:\n from narwhals._arrow.dataframe import ArrowDataFrame\n\n return ArrowDataFrame(\n self.native,\n backend_version=self._backend_version,\n version=self._version,\n validate_column_names=False,\n )\n\n if backend is Implementation.PANDAS:\n import pandas as pd # ignore-banned-import\n\n from narwhals._pandas_like.dataframe import PandasLikeDataFrame\n\n return PandasLikeDataFrame(\n self.native.to_pandas(),\n implementation=Implementation.PANDAS,\n backend_version=parse_version(pd),\n version=self._version,\n validate_column_names=False,\n )\n\n if backend is Implementation.POLARS:\n import polars as pl # ignore-banned-import\n\n from narwhals._polars.dataframe import PolarsDataFrame\n\n return PolarsDataFrame(\n cast("pl.DataFrame", pl.from_arrow(self.native)),\n backend_version=parse_version(pl),\n version=self._version,\n )\n\n msg = f"Unsupported `backend` value: {backend}" # pragma: no cover\n raise AssertionError(msg) # pragma: no cover\n\n def clone(self) -> Self:\n return self._with_native(self.native, validate_column_names=False)\n\n def item(self, row: int | None, column: int | str | None) -> Any:\n from narwhals._arrow.series import maybe_extract_py_scalar\n\n if row is None and column is None:\n if self.shape != (1, 1):\n msg = (\n "can only call `.item()` if the dataframe is of shape (1, 1),"\n " or if explicit row/col values are provided;"\n f" frame has shape {self.shape!r}"\n )\n raise ValueError(msg)\n return maybe_extract_py_scalar(self.native[0][0], return_py_scalar=True)\n\n elif row is None or column is None:\n msg = "cannot call `.item()` with only one of `row` or `column`"\n raise ValueError(msg)\n\n _col = self.columns.index(column) if isinstance(column, str) else column\n return maybe_extract_py_scalar(self.native[_col][row], return_py_scalar=True)\n\n def rename(self, mapping: Mapping[str, str]) -> Self:\n names: dict[str, str] | list[str]\n if self._backend_version >= (17,):\n names = cast("dict[str, str]", mapping)\n else: # pragma: no cover\n names = [mapping.get(c, c) for c in self.columns]\n return self._with_native(self.native.rename_columns(names))\n\n def write_parquet(self, file: str | Path | BytesIO) -> None:\n import pyarrow.parquet as pp\n\n pp.write_table(self.native, file)\n\n @overload\n def write_csv(self, file: None) -> str: ...\n\n @overload\n def write_csv(self, file: str | Path | BytesIO) -> None: ...\n\n def write_csv(self, file: str | Path | BytesIO | None) -> str | None:\n import pyarrow.csv as pa_csv\n\n if file is None:\n csv_buffer = pa.BufferOutputStream()\n pa_csv.write_csv(self.native, csv_buffer)\n return csv_buffer.getvalue().to_pybytes().decode()\n pa_csv.write_csv(self.native, file)\n return None\n\n def is_unique(self) -> ArrowSeries:\n import numpy as np # ignore-banned-import\n\n col_token = generate_temporary_column_name(n_bytes=8, columns=self.columns)\n row_index = pa.array(np.arange(len(self)))\n keep_idx = (\n self.native.append_column(col_token, row_index)\n .group_by(self.columns)\n .aggregate([(col_token, "min"), (col_token, "max")])\n )\n native = pa.chunked_array(\n pc.and_(\n pc.is_in(row_index, keep_idx[f"{col_token}_min"]),\n pc.is_in(row_index, keep_idx[f"{col_token}_max"]),\n )\n )\n return ArrowSeries.from_native(native, context=self)\n\n def unique(\n self,\n subset: Sequence[str] | None,\n *,\n keep: UniqueKeepStrategy,\n maintain_order: bool | None = None,\n ) -> Self:\n # The param `maintain_order` is only here for compatibility with the Polars API\n # and has no effect on the output.\n import numpy as np # ignore-banned-import\n\n if subset and (error := self._check_columns_exist(subset)):\n raise error\n subset = list(subset or self.columns)\n\n if keep in {"any", "first", "last"}:\n from narwhals._arrow.group_by import ArrowGroupBy\n\n agg_func = ArrowGroupBy._REMAP_UNIQUE[keep]\n col_token = generate_temporary_column_name(n_bytes=8, columns=self.columns)\n keep_idx_native = (\n self.native.append_column(col_token, pa.array(np.arange(len(self))))\n .group_by(subset)\n .aggregate([(col_token, agg_func)])\n .column(f"{col_token}_{agg_func}")\n )\n return self._with_native(\n self.native.take(keep_idx_native), validate_column_names=False\n )\n\n keep_idx = self.simple_select(*subset).is_unique()\n plx = self.__narwhals_namespace__()\n return self.filter(plx._expr._from_series(keep_idx))\n\n def gather_every(self, n: int, offset: int) -> Self:\n return self._with_native(self.native[offset::n], validate_column_names=False)\n\n def to_arrow(self) -> pa.Table:\n return self.native\n\n def sample(\n self,\n n: int | None,\n *,\n fraction: float | None,\n with_replacement: bool,\n seed: int | None,\n ) -> Self:\n import numpy as np # ignore-banned-import\n\n num_rows = len(self)\n if n is None and fraction is not None:\n n = int(num_rows * fraction)\n rng = np.random.default_rng(seed=seed)\n idx = np.arange(num_rows)\n mask = rng.choice(idx, size=n, replace=with_replacement)\n return self._with_native(self.native.take(mask), validate_column_names=False)\n\n def unpivot(\n self,\n on: Sequence[str] | None,\n index: Sequence[str] | None,\n variable_name: str,\n value_name: str,\n ) -> Self:\n n_rows = len(self)\n index_ = [] if index is None else index\n on_ = [c for c in self.columns if c not in index_] if on is None else on\n concat = (\n partial(pa.concat_tables, promote_options="permissive")\n if self._backend_version >= (14, 0, 0)\n else pa.concat_tables\n )\n names = [*index_, variable_name, value_name]\n return self._with_native(\n concat(\n [\n pa.Table.from_arrays(\n [\n *(self.native.column(idx_col) for idx_col in index_),\n cast(\n "ChunkedArrayAny",\n pa.array([on_col] * n_rows, pa.string()),\n ),\n self.native.column(on_col),\n ],\n names=names,\n )\n for on_col in on_\n ]\n )\n )\n # TODO(Unassigned): Even with promote_options="permissive", pyarrow does not\n # upcast numeric to non-numeric (e.g. string) datatypes\n\n pivot = not_implemented()\n
.venv\Lib\site-packages\narwhals\_arrow\dataframe.py
dataframe.py
Python
28,247
0.95
0.181582
0.030769
react-lib
380
2024-06-24T21:57:17.516418
BSD-3-Clause
false
28ff647ba1dc5b6ee8be4d97923addda
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nimport pyarrow.compute as pc\n\nfrom narwhals._arrow.series import ArrowSeries\nfrom narwhals._compliant import EagerExpr\nfrom narwhals._expression_parsing import evaluate_output_names_and_aliases\nfrom narwhals._utils import (\n Implementation,\n generate_temporary_column_name,\n not_implemented,\n)\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from typing_extensions import Self\n\n from narwhals._arrow.dataframe import ArrowDataFrame\n from narwhals._arrow.namespace import ArrowNamespace\n from narwhals._compliant.typing import AliasNames, EvalNames, EvalSeries, ScalarKwargs\n from narwhals._expression_parsing import ExprMetadata\n from narwhals._utils import Version, _FullContext\n from narwhals.typing import RankMethod\n\n\nclass ArrowExpr(EagerExpr["ArrowDataFrame", ArrowSeries]):\n _implementation: Implementation = Implementation.PYARROW\n\n def __init__(\n self,\n call: EvalSeries[ArrowDataFrame, ArrowSeries],\n *,\n depth: int,\n function_name: str,\n evaluate_output_names: EvalNames[ArrowDataFrame],\n alias_output_names: AliasNames | None,\n backend_version: tuple[int, ...],\n version: Version,\n scalar_kwargs: ScalarKwargs | None = None,\n implementation: Implementation | None = None,\n ) -> None:\n self._call = call\n self._depth = depth\n self._function_name = function_name\n self._depth = depth\n self._evaluate_output_names = evaluate_output_names\n self._alias_output_names = alias_output_names\n self._backend_version = backend_version\n self._version = version\n self._scalar_kwargs = scalar_kwargs or {}\n self._metadata: ExprMetadata | None = None\n\n @classmethod\n def from_column_names(\n cls: type[Self],\n evaluate_column_names: EvalNames[ArrowDataFrame],\n /,\n *,\n context: _FullContext,\n function_name: str = "",\n ) -> Self:\n def func(df: ArrowDataFrame) -> list[ArrowSeries]:\n try:\n return [\n ArrowSeries(\n df.native[column_name],\n name=column_name,\n backend_version=df._backend_version,\n version=df._version,\n )\n for column_name in evaluate_column_names(df)\n ]\n except KeyError as e:\n if error := df._check_columns_exist(evaluate_column_names(df)):\n raise error from e\n raise\n\n return cls(\n func,\n depth=0,\n function_name=function_name,\n evaluate_output_names=evaluate_column_names,\n alias_output_names=None,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n @classmethod\n def from_column_indices(cls, *column_indices: int, context: _FullContext) -> Self:\n def func(df: ArrowDataFrame) -> list[ArrowSeries]:\n tbl = df.native\n cols = df.columns\n return [\n ArrowSeries.from_native(tbl[i], name=cols[i], context=df)\n for i in column_indices\n ]\n\n return cls(\n func,\n depth=0,\n function_name="nth",\n evaluate_output_names=cls._eval_names_indices(column_indices),\n alias_output_names=None,\n backend_version=context._backend_version,\n version=context._version,\n )\n\n def __narwhals_namespace__(self) -> ArrowNamespace:\n from narwhals._arrow.namespace import ArrowNamespace\n\n return ArrowNamespace(\n backend_version=self._backend_version, version=self._version\n )\n\n def __narwhals_expr__(self) -> None: ...\n\n def _reuse_series_extra_kwargs(\n self, *, returns_scalar: bool = False\n ) -> dict[str, Any]:\n return {"_return_py_scalar": False} if returns_scalar else {}\n\n def cum_sum(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_sum", reverse=reverse)\n\n def shift(self, n: int) -> Self:\n return self._reuse_series("shift", n=n)\n\n def over(self, partition_by: Sequence[str], order_by: Sequence[str]) -> Self:\n if (\n partition_by\n and self._metadata is not None\n and not self._metadata.is_scalar_like\n ):\n msg = "Only aggregation or literal operations are supported in grouped `over` context for PyArrow."\n raise NotImplementedError(msg)\n\n if not partition_by:\n # e.g. `nw.col('a').cum_sum().order_by(key)`\n # which we can always easily support, as it doesn't require grouping.\n assert order_by # noqa: S101\n\n def func(df: ArrowDataFrame) -> Sequence[ArrowSeries]:\n token = generate_temporary_column_name(8, df.columns)\n df = df.with_row_index(token, order_by=None).sort(\n *order_by, descending=False, nulls_last=False\n )\n result = self(df.drop([token], strict=True))\n # TODO(marco): is there a way to do this efficiently without\n # doing 2 sorts? Here we're sorting the dataframe and then\n # again calling `sort_indices`. `ArrowSeries.scatter` would also sort.\n sorting_indices = pc.sort_indices(df.get_column(token).native)\n return [s._with_native(s.native.take(sorting_indices)) for s in result]\n else:\n\n def func(df: ArrowDataFrame) -> Sequence[ArrowSeries]:\n output_names, aliases = evaluate_output_names_and_aliases(self, df, [])\n if overlap := set(output_names).intersection(partition_by):\n # E.g. `df.select(nw.all().sum().over('a'))`. This is well-defined,\n # we just don't support it yet.\n msg = (\n f"Column names {overlap} appear in both expression output names and in `over` keys.\n"\n "This is not yet supported."\n )\n raise NotImplementedError(msg)\n\n tmp = df.group_by(partition_by, drop_null_keys=False).agg(self)\n tmp = df.simple_select(*partition_by).join(\n tmp,\n how="left",\n left_on=partition_by,\n right_on=partition_by,\n suffix="_right",\n )\n return [tmp.get_column(alias) for alias in aliases]\n\n return self.__class__(\n func,\n depth=self._depth + 1,\n function_name=self._function_name + "->over",\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def cum_count(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_count", reverse=reverse)\n\n def cum_min(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_min", reverse=reverse)\n\n def cum_max(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_max", reverse=reverse)\n\n def cum_prod(self, *, reverse: bool) -> Self:\n return self._reuse_series("cum_prod", reverse=reverse)\n\n def rank(self, method: RankMethod, *, descending: bool) -> Self:\n return self._reuse_series("rank", method=method, descending=descending)\n\n def log(self, base: float) -> Self:\n return self._reuse_series("log", base=base)\n\n def exp(self) -> Self:\n return self._reuse_series("exp")\n\n def sqrt(self) -> Self:\n return self._reuse_series("sqrt")\n\n ewm_mean = not_implemented()\n
.venv\Lib\site-packages\narwhals\_arrow\expr.py
expr.py
Python
7,925
0.95
0.159624
0.055866
react-lib
606
2025-01-24T09:26:14.275087
Apache-2.0
false
b186c0e8aad2204f8196b7523bc7337b
from __future__ import annotations\n\nimport collections\nfrom typing import TYPE_CHECKING, Any, ClassVar\n\nimport pyarrow as pa\nimport pyarrow.compute as pc\n\nfrom narwhals._arrow.utils import cast_to_comparable_string_types, extract_py_scalar\nfrom narwhals._compliant import EagerGroupBy\nfrom narwhals._expression_parsing import evaluate_output_names_and_aliases\nfrom narwhals._utils import generate_temporary_column_name\n\nif TYPE_CHECKING:\n from collections.abc import Iterator, Mapping, Sequence\n\n from narwhals._arrow.dataframe import ArrowDataFrame\n from narwhals._arrow.expr import ArrowExpr\n from narwhals._arrow.typing import ( # type: ignore[attr-defined]\n AggregateOptions,\n Aggregation,\n Incomplete,\n )\n from narwhals._compliant.group_by import NarwhalsAggregation\n from narwhals.typing import UniqueKeepStrategy\n\n\nclass ArrowGroupBy(EagerGroupBy["ArrowDataFrame", "ArrowExpr", "Aggregation"]):\n _REMAP_AGGS: ClassVar[Mapping[NarwhalsAggregation, Aggregation]] = {\n "sum": "sum",\n "mean": "mean",\n "median": "approximate_median",\n "max": "max",\n "min": "min",\n "std": "stddev",\n "var": "variance",\n "len": "count",\n "n_unique": "count_distinct",\n "count": "count",\n }\n _REMAP_UNIQUE: ClassVar[Mapping[UniqueKeepStrategy, Aggregation]] = {\n "any": "min",\n "first": "min",\n "last": "max",\n }\n\n def __init__(\n self,\n df: ArrowDataFrame,\n keys: Sequence[ArrowExpr] | Sequence[str],\n /,\n *,\n drop_null_keys: bool,\n ) -> None:\n self._df = df\n frame, self._keys, self._output_key_names = self._parse_keys(df, keys=keys)\n self._compliant_frame = frame.drop_nulls(self._keys) if drop_null_keys else frame\n self._grouped = pa.TableGroupBy(self.compliant.native, self._keys)\n self._drop_null_keys = drop_null_keys\n\n def agg(self, *exprs: ArrowExpr) -> ArrowDataFrame:\n self._ensure_all_simple(exprs)\n aggs: list[tuple[str, Aggregation, AggregateOptions | None]] = []\n expected_pyarrow_column_names: list[str] = self._keys.copy()\n new_column_names: list[str] = self._keys.copy()\n exclude = (*self._keys, *self._output_key_names)\n\n for expr in exprs:\n output_names, aliases = evaluate_output_names_and_aliases(\n expr, self.compliant, exclude\n )\n\n if expr._depth == 0:\n # e.g. `agg(nw.len())`\n if expr._function_name != "len": # pragma: no cover\n msg = "Safety assertion failed, please report a bug to https://github.com/narwhals-dev/narwhals/issues"\n raise AssertionError(msg)\n\n new_column_names.append(aliases[0])\n expected_pyarrow_column_names.append(f"{self._keys[0]}_count")\n aggs.append((self._keys[0], "count", pc.CountOptions(mode="all")))\n continue\n\n function_name = self._leaf_name(expr)\n if function_name in {"std", "var"}:\n assert "ddof" in expr._scalar_kwargs # noqa: S101\n option: Any = pc.VarianceOptions(ddof=expr._scalar_kwargs["ddof"])\n elif function_name in {"len", "n_unique"}:\n option = pc.CountOptions(mode="all")\n elif function_name == "count":\n option = pc.CountOptions(mode="only_valid")\n else:\n option = None\n\n function_name = self._remap_expr_name(function_name)\n new_column_names.extend(aliases)\n expected_pyarrow_column_names.extend(\n [f"{output_name}_{function_name}" for output_name in output_names]\n )\n aggs.extend(\n [(output_name, function_name, option) for output_name in output_names]\n )\n\n result_simple = self._grouped.aggregate(aggs)\n\n # Rename columns, being very careful\n expected_old_names_indices: dict[str, list[int]] = collections.defaultdict(list)\n for idx, item in enumerate(expected_pyarrow_column_names):\n expected_old_names_indices[item].append(idx)\n if not (\n set(result_simple.column_names) == set(expected_pyarrow_column_names)\n and len(result_simple.column_names) == len(expected_pyarrow_column_names)\n ): # pragma: no cover\n msg = (\n f"Safety assertion failed, expected {expected_pyarrow_column_names} "\n f"got {result_simple.column_names}, "\n "please report a bug at https://github.com/narwhals-dev/narwhals/issues"\n )\n raise AssertionError(msg)\n index_map: list[int] = [\n expected_old_names_indices[item].pop(0) for item in result_simple.column_names\n ]\n new_column_names = [new_column_names[i] for i in index_map]\n result_simple = result_simple.rename_columns(new_column_names)\n if self.compliant._backend_version < (12, 0, 0):\n columns = result_simple.column_names\n result_simple = result_simple.select(\n [*self._keys, *[col for col in columns if col not in self._keys]]\n )\n\n return self.compliant._with_native(result_simple).rename(\n dict(zip(self._keys, self._output_key_names))\n )\n\n def __iter__(self) -> Iterator[tuple[Any, ArrowDataFrame]]:\n col_token = generate_temporary_column_name(\n n_bytes=8, columns=self.compliant.columns\n )\n null_token: str = "__null_token_value__" # noqa: S105\n\n table = self.compliant.native\n it, separator_scalar = cast_to_comparable_string_types(\n *(table[key] for key in self._keys), separator=""\n )\n # NOTE: stubs indicate `separator` must also be a `ChunkedArray`\n # Reality: `str` is fine\n concat_str: Incomplete = pc.binary_join_element_wise\n key_values = concat_str(\n *it, separator_scalar, null_handling="replace", null_replacement=null_token\n )\n table = table.add_column(i=0, field_=col_token, column=key_values)\n\n for v in pc.unique(key_values):\n t = self.compliant._with_native(\n table.filter(pc.equal(table[col_token], v)).drop([col_token])\n )\n row = t.simple_select(*self._keys).row(0)\n yield (\n tuple(extract_py_scalar(el) for el in row),\n t.simple_select(*self._df.columns),\n )\n
.venv\Lib\site-packages\narwhals\_arrow\group_by.py
group_by.py
Python
6,555
0.95
0.136646
0.049645
node-utils
373
2023-07-20T15:08:27.600573
BSD-3-Clause
false
9f1c735bf689f6aba41e5da8cc37f5d0
from __future__ import annotations\n\nimport operator\nfrom functools import reduce\nfrom itertools import chain\nfrom typing import TYPE_CHECKING, Literal\n\nimport pyarrow as pa\nimport pyarrow.compute as pc\n\nfrom narwhals._arrow.dataframe import ArrowDataFrame\nfrom narwhals._arrow.expr import ArrowExpr\nfrom narwhals._arrow.selectors import ArrowSelectorNamespace\nfrom narwhals._arrow.series import ArrowSeries\nfrom narwhals._arrow.utils import cast_to_comparable_string_types\nfrom narwhals._compliant import CompliantThen, EagerNamespace, EagerWhen\nfrom narwhals._expression_parsing import (\n combine_alias_output_names,\n combine_evaluate_output_names,\n)\nfrom narwhals._utils import Implementation\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n\n from narwhals._arrow.typing import ArrayOrScalar, ChunkedArrayAny, Incomplete\n from narwhals._utils import Version\n from narwhals.typing import IntoDType, NonNestedLiteral\n\n\nclass ArrowNamespace(\n EagerNamespace[ArrowDataFrame, ArrowSeries, ArrowExpr, pa.Table, "ChunkedArrayAny"]\n):\n @property\n def _dataframe(self) -> type[ArrowDataFrame]:\n return ArrowDataFrame\n\n @property\n def _expr(self) -> type[ArrowExpr]:\n return ArrowExpr\n\n @property\n def _series(self) -> type[ArrowSeries]:\n return ArrowSeries\n\n # --- not in spec ---\n def __init__(self, *, backend_version: tuple[int, ...], version: Version) -> None:\n self._backend_version = backend_version\n self._implementation = Implementation.PYARROW\n self._version = version\n\n def len(self) -> ArrowExpr:\n # coverage bug? this is definitely hit\n return self._expr( # pragma: no cover\n lambda df: [\n ArrowSeries.from_iterable([len(df.native)], name="len", context=self)\n ],\n depth=0,\n function_name="len",\n evaluate_output_names=lambda _df: ["len"],\n alias_output_names=None,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def lit(self, value: NonNestedLiteral, dtype: IntoDType | None) -> ArrowExpr:\n def _lit_arrow_series(_: ArrowDataFrame) -> ArrowSeries:\n arrow_series = ArrowSeries.from_iterable(\n data=[value], name="literal", context=self\n )\n if dtype:\n return arrow_series.cast(dtype)\n return arrow_series\n\n return self._expr(\n lambda df: [_lit_arrow_series(df)],\n depth=0,\n function_name="lit",\n evaluate_output_names=lambda _df: ["literal"],\n alias_output_names=None,\n backend_version=self._backend_version,\n version=self._version,\n )\n\n def all_horizontal(self, *exprs: ArrowExpr, ignore_nulls: bool) -> ArrowExpr:\n def func(df: ArrowDataFrame) -> list[ArrowSeries]:\n series = chain.from_iterable(expr(df) for expr in exprs)\n align = self._series._align_full_broadcast\n it = (\n (s.fill_null(True, None, None) for s in series) # noqa: FBT003\n if ignore_nulls\n else series\n )\n return [reduce(operator.and_, align(*it))]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="all_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def any_horizontal(self, *exprs: ArrowExpr, ignore_nulls: bool) -> ArrowExpr:\n def func(df: ArrowDataFrame) -> list[ArrowSeries]:\n series = chain.from_iterable(expr(df) for expr in exprs)\n align = self._series._align_full_broadcast\n it = (\n (s.fill_null(False, None, None) for s in series) # noqa: FBT003\n if ignore_nulls\n else series\n )\n return [reduce(operator.or_, align(*it))]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="any_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def sum_horizontal(self, *exprs: ArrowExpr) -> ArrowExpr:\n def func(df: ArrowDataFrame) -> list[ArrowSeries]:\n it = chain.from_iterable(expr(df) for expr in exprs)\n series = (s.fill_null(0, strategy=None, limit=None) for s in it)\n align = self._series._align_full_broadcast\n return [reduce(operator.add, align(*series))]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="sum_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def mean_horizontal(self, *exprs: ArrowExpr) -> ArrowExpr:\n int_64 = self._version.dtypes.Int64()\n\n def func(df: ArrowDataFrame) -> list[ArrowSeries]:\n expr_results = list(chain.from_iterable(expr(df) for expr in exprs))\n align = self._series._align_full_broadcast\n series = align(\n *(s.fill_null(0, strategy=None, limit=None) for s in expr_results)\n )\n non_na = align(*(1 - s.is_null().cast(int_64) for s in expr_results))\n return [reduce(operator.add, series) / reduce(operator.add, non_na)]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="mean_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def min_horizontal(self, *exprs: ArrowExpr) -> ArrowExpr:\n def func(df: ArrowDataFrame) -> list[ArrowSeries]:\n align = self._series._align_full_broadcast\n init_series, *series = list(chain.from_iterable(expr(df) for expr in exprs))\n init_series, *series = align(init_series, *series)\n native_series = reduce(\n pc.min_element_wise, [s.native for s in series], init_series.native\n )\n return [\n ArrowSeries(\n native_series,\n name=init_series.name,\n backend_version=self._backend_version,\n version=self._version,\n )\n ]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="min_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def max_horizontal(self, *exprs: ArrowExpr) -> ArrowExpr:\n def func(df: ArrowDataFrame) -> list[ArrowSeries]:\n align = self._series._align_full_broadcast\n init_series, *series = list(chain.from_iterable(expr(df) for expr in exprs))\n init_series, *series = align(init_series, *series)\n native_series = reduce(\n pc.max_element_wise, [s.native for s in series], init_series.native\n )\n return [\n ArrowSeries(\n native_series,\n name=init_series.name,\n backend_version=self._backend_version,\n version=self._version,\n )\n ]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="max_horizontal",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n def _concat_diagonal(self, dfs: Sequence[pa.Table], /) -> pa.Table:\n if self._backend_version >= (14,):\n return pa.concat_tables(dfs, promote_options="default")\n return pa.concat_tables(dfs, promote=True) # pragma: no cover\n\n def _concat_horizontal(self, dfs: Sequence[pa.Table], /) -> pa.Table:\n names = list(chain.from_iterable(df.column_names for df in dfs))\n arrays = list(chain.from_iterable(df.itercolumns() for df in dfs))\n return pa.Table.from_arrays(arrays, names=names)\n\n def _concat_vertical(self, dfs: Sequence[pa.Table], /) -> pa.Table:\n cols_0 = dfs[0].column_names\n for i, df in enumerate(dfs[1:], start=1):\n cols_current = df.column_names\n if cols_current != cols_0:\n msg = (\n "unable to vstack, column names don't match:\n"\n f" - dataframe 0: {cols_0}\n"\n f" - dataframe {i}: {cols_current}\n"\n )\n raise TypeError(msg)\n return pa.concat_tables(dfs)\n\n @property\n def selectors(self) -> ArrowSelectorNamespace:\n return ArrowSelectorNamespace.from_namespace(self)\n\n def when(self, predicate: ArrowExpr) -> ArrowWhen:\n return ArrowWhen.from_expr(predicate, context=self)\n\n def concat_str(\n self, *exprs: ArrowExpr, separator: str, ignore_nulls: bool\n ) -> ArrowExpr:\n def func(df: ArrowDataFrame) -> list[ArrowSeries]:\n align = self._series._align_full_broadcast\n compliant_series_list = align(\n *(chain.from_iterable(expr(df) for expr in exprs))\n )\n name = compliant_series_list[0].name\n null_handling: Literal["skip", "emit_null"] = (\n "skip" if ignore_nulls else "emit_null"\n )\n it, separator_scalar = cast_to_comparable_string_types(\n *(s.native for s in compliant_series_list), separator=separator\n )\n # NOTE: stubs indicate `separator` must also be a `ChunkedArray`\n # Reality: `str` is fine\n concat_str: Incomplete = pc.binary_join_element_wise\n compliant = self._series(\n concat_str(*it, separator_scalar, null_handling=null_handling),\n name=name,\n backend_version=self._backend_version,\n version=self._version,\n )\n return [compliant]\n\n return self._expr._from_callable(\n func=func,\n depth=max(x._depth for x in exprs) + 1,\n function_name="concat_str",\n evaluate_output_names=combine_evaluate_output_names(*exprs),\n alias_output_names=combine_alias_output_names(*exprs),\n context=self,\n )\n\n\nclass ArrowWhen(EagerWhen[ArrowDataFrame, ArrowSeries, ArrowExpr, "ChunkedArrayAny"]):\n @property\n def _then(self) -> type[ArrowThen]:\n return ArrowThen\n\n def _if_then_else(\n self,\n when: ChunkedArrayAny,\n then: ChunkedArrayAny,\n otherwise: ArrayOrScalar | NonNestedLiteral,\n /,\n ) -> ChunkedArrayAny:\n otherwise = pa.nulls(len(when), then.type) if otherwise is None else otherwise\n return pc.if_else(when, then, otherwise)\n\n\nclass ArrowThen(CompliantThen[ArrowDataFrame, ArrowSeries, ArrowExpr], ArrowExpr): ...\n
.venv\Lib\site-packages\narwhals\_arrow\namespace.py
namespace.py
Python
11,581
0.95
0.216216
0.027132
vue-tools
858
2024-07-04T19:30:10.764586
GPL-3.0
false
c93c54fe19d514ccdf40b0f33fb46a82
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom narwhals._arrow.expr import ArrowExpr\nfrom narwhals._compliant import CompliantSelector, EagerSelectorNamespace\n\nif TYPE_CHECKING:\n from narwhals._arrow.dataframe import ArrowDataFrame # noqa: F401\n from narwhals._arrow.series import ArrowSeries # noqa: F401\n\n\nclass ArrowSelectorNamespace(EagerSelectorNamespace["ArrowDataFrame", "ArrowSeries"]):\n @property\n def _selector(self) -> type[ArrowSelector]:\n return ArrowSelector\n\n\nclass ArrowSelector(CompliantSelector["ArrowDataFrame", "ArrowSeries"], ArrowExpr): # type: ignore[misc]\n def _to_expr(self) -> ArrowExpr:\n return ArrowExpr(\n self._call,\n depth=self._depth,\n function_name=self._function_name,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n version=self._version,\n )\n
.venv\Lib\site-packages\narwhals\_arrow\selectors.py
selectors.py
Python
1,011
0.95
0.172414
0
node-utils
253
2025-01-14T18:29:42.689257
Apache-2.0
false
dcacdfb5ba3af91127957caf95af1bc6
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, cast, overload\n\nimport pyarrow as pa\nimport pyarrow.compute as pc\n\nfrom narwhals._arrow.series_cat import ArrowSeriesCatNamespace\nfrom narwhals._arrow.series_dt import ArrowSeriesDateTimeNamespace\nfrom narwhals._arrow.series_list import ArrowSeriesListNamespace\nfrom narwhals._arrow.series_str import ArrowSeriesStringNamespace\nfrom narwhals._arrow.series_struct import ArrowSeriesStructNamespace\nfrom narwhals._arrow.utils import (\n cast_for_truediv,\n chunked_array,\n extract_native,\n floordiv_compat,\n lit,\n narwhals_to_native_dtype,\n native_to_narwhals_dtype,\n nulls_like,\n pad_series,\n)\nfrom narwhals._compliant import EagerSeries\nfrom narwhals._expression_parsing import ExprKind\nfrom narwhals._typing_compat import assert_never\nfrom narwhals._utils import (\n Implementation,\n generate_temporary_column_name,\n is_list_of,\n not_implemented,\n requires,\n validate_backend_version,\n)\nfrom narwhals.dependencies import is_numpy_array_1d\nfrom narwhals.exceptions import InvalidOperationError, ShapeError\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Iterator, Mapping, Sequence\n from types import ModuleType\n\n import pandas as pd\n import polars as pl\n from typing_extensions import Self, TypeIs\n\n from narwhals._arrow.dataframe import ArrowDataFrame\n from narwhals._arrow.namespace import ArrowNamespace\n from narwhals._arrow.typing import ( # type: ignore[attr-defined]\n ArrayAny,\n ArrayOrChunkedArray,\n ArrayOrScalar,\n ChunkedArrayAny,\n Incomplete,\n NullPlacement,\n Order,\n TieBreaker,\n _AsPyType,\n _BasicDataType,\n )\n from narwhals._utils import Version, _FullContext\n from narwhals.dtypes import DType\n from narwhals.typing import (\n ClosedInterval,\n FillNullStrategy,\n Into1DArray,\n IntoDType,\n NonNestedLiteral,\n NumericLiteral,\n PythonLiteral,\n RankMethod,\n RollingInterpolationMethod,\n SizedMultiIndexSelector,\n TemporalLiteral,\n _1DArray,\n _2DArray,\n _SliceIndex,\n )\n\n\n# TODO @dangotbanned: move into `_arrow.utils`\n# Lots of modules are importing inline\n@overload\ndef maybe_extract_py_scalar(\n value: pa.Scalar[_BasicDataType[_AsPyType]],\n return_py_scalar: bool, # noqa: FBT001\n) -> _AsPyType: ...\n\n\n@overload\ndef maybe_extract_py_scalar(\n value: pa.Scalar[pa.StructType],\n return_py_scalar: bool, # noqa: FBT001\n) -> list[dict[str, Any]]: ...\n\n\n@overload\ndef maybe_extract_py_scalar(\n value: pa.Scalar[pa.ListType[_BasicDataType[_AsPyType]]],\n return_py_scalar: bool, # noqa: FBT001\n) -> list[_AsPyType]: ...\n\n\n@overload\ndef maybe_extract_py_scalar(\n value: pa.Scalar[Any] | Any,\n return_py_scalar: bool, # noqa: FBT001\n) -> Any: ...\n\n\ndef maybe_extract_py_scalar(value: Any, return_py_scalar: bool) -> Any: # noqa: FBT001\n if TYPE_CHECKING:\n return value.as_py()\n if return_py_scalar:\n return getattr(value, "as_py", lambda: value)()\n return value\n\n\nclass ArrowSeries(EagerSeries["ChunkedArrayAny"]):\n def __init__(\n self,\n native_series: ChunkedArrayAny,\n *,\n name: str,\n backend_version: tuple[int, ...],\n version: Version,\n ) -> None:\n self._name = name\n self._native_series: ChunkedArrayAny = native_series\n self._implementation = Implementation.PYARROW\n self._backend_version = backend_version\n self._version = version\n validate_backend_version(self._implementation, self._backend_version)\n self._broadcast = False\n\n @property\n def native(self) -> ChunkedArrayAny:\n return self._native_series\n\n def _with_version(self, version: Version) -> Self:\n return self.__class__(\n self.native,\n name=self._name,\n backend_version=self._backend_version,\n version=version,\n )\n\n def _with_native(\n self, series: ArrayOrScalar, *, preserve_broadcast: bool = False\n ) -> Self:\n result = self.from_native(chunked_array(series), name=self.name, context=self)\n if preserve_broadcast:\n result._broadcast = self._broadcast\n return result\n\n @classmethod\n def from_iterable(\n cls,\n data: Iterable[Any],\n *,\n context: _FullContext,\n name: str = "",\n dtype: IntoDType | None = None,\n ) -> Self:\n version = context._version\n dtype_pa = narwhals_to_native_dtype(dtype, version) if dtype else None\n return cls.from_native(\n chunked_array([data], dtype_pa), name=name, context=context\n )\n\n def _from_scalar(self, value: Any) -> Self:\n if self._backend_version < (13,) and hasattr(value, "as_py"):\n value = value.as_py()\n return super()._from_scalar(value)\n\n @staticmethod\n def _is_native(obj: ChunkedArrayAny | Any) -> TypeIs[ChunkedArrayAny]:\n return isinstance(obj, pa.ChunkedArray)\n\n @classmethod\n def from_native(\n cls, data: ChunkedArrayAny, /, *, context: _FullContext, name: str = ""\n ) -> Self:\n return cls(\n data,\n backend_version=context._backend_version,\n version=context._version,\n name=name,\n )\n\n @classmethod\n def from_numpy(cls, data: Into1DArray, /, *, context: _FullContext) -> Self:\n return cls.from_iterable(\n data if is_numpy_array_1d(data) else [data], context=context\n )\n\n @classmethod\n def _align_full_broadcast(cls, *series: Self) -> Sequence[Self]:\n lengths = [len(s) for s in series]\n max_length = max(lengths)\n fast_path = all(_len == max_length for _len in lengths)\n if fast_path:\n return series\n reshaped = []\n for s in series:\n if s._broadcast:\n compliant = s._with_native(pa.repeat(s.native[0], max_length))\n elif (actual_len := len(s)) != max_length:\n msg = f"Expected object of length {max_length}, got {actual_len}."\n raise ShapeError(msg)\n else:\n compliant = s\n reshaped.append(compliant)\n return reshaped\n\n def __narwhals_namespace__(self) -> ArrowNamespace:\n from narwhals._arrow.namespace import ArrowNamespace\n\n return ArrowNamespace(\n backend_version=self._backend_version, version=self._version\n )\n\n def __eq__(self, other: object) -> Self: # type: ignore[override]\n other = cast("PythonLiteral | ArrowSeries | None", other)\n ser, rhs = extract_native(self, other)\n return self._with_native(pc.equal(ser, rhs))\n\n def __ne__(self, other: object) -> Self: # type: ignore[override]\n other = cast("PythonLiteral | ArrowSeries | None", other)\n ser, rhs = extract_native(self, other)\n return self._with_native(pc.not_equal(ser, rhs))\n\n def __ge__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.greater_equal(ser, other))\n\n def __gt__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.greater(ser, other))\n\n def __le__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.less_equal(ser, other))\n\n def __lt__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.less(ser, other))\n\n def __and__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.and_kleene(ser, other)) # type: ignore[arg-type]\n\n def __rand__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.and_kleene(other, ser)) # type: ignore[arg-type]\n\n def __or__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.or_kleene(ser, other)) # type: ignore[arg-type]\n\n def __ror__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.or_kleene(other, ser)) # type: ignore[arg-type]\n\n def __add__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.add(ser, other))\n\n def __radd__(self, other: Any) -> Self:\n return self + other\n\n def __sub__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.subtract(ser, other))\n\n def __rsub__(self, other: Any) -> Self:\n return (self - other) * (-1)\n\n def __mul__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.multiply(ser, other))\n\n def __rmul__(self, other: Any) -> Self:\n return self * other\n\n def __pow__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.power(ser, other))\n\n def __rpow__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.power(other, ser))\n\n def __floordiv__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(floordiv_compat(ser, other))\n\n def __rfloordiv__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(floordiv_compat(other, ser))\n\n def __truediv__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.divide(*cast_for_truediv(ser, other))) # type: ignore[type-var]\n\n def __rtruediv__(self, other: Any) -> Self:\n ser, other = extract_native(self, other)\n return self._with_native(pc.divide(*cast_for_truediv(other, ser))) # type: ignore[type-var]\n\n def __mod__(self, other: Any) -> Self:\n floor_div = (self // other).native\n ser, other = extract_native(self, other)\n res = pc.subtract(ser, pc.multiply(floor_div, other))\n return self._with_native(res)\n\n def __rmod__(self, other: Any) -> Self:\n floor_div = (other // self).native\n ser, other = extract_native(self, other)\n res = pc.subtract(other, pc.multiply(floor_div, ser))\n return self._with_native(res)\n\n def __invert__(self) -> Self:\n return self._with_native(pc.invert(self.native))\n\n @property\n def _type(self) -> pa.DataType:\n return self.native.type\n\n def len(self, *, _return_py_scalar: bool = True) -> int:\n return maybe_extract_py_scalar(len(self.native), _return_py_scalar)\n\n def filter(self, predicate: ArrowSeries | list[bool | None]) -> Self:\n other_native: Any\n if not is_list_of(predicate, bool):\n _, other_native = extract_native(self, predicate)\n else:\n other_native = predicate\n return self._with_native(self.native.filter(other_native))\n\n def mean(self, *, _return_py_scalar: bool = True) -> float:\n return maybe_extract_py_scalar(pc.mean(self.native), _return_py_scalar)\n\n def median(self, *, _return_py_scalar: bool = True) -> float:\n from narwhals.exceptions import InvalidOperationError\n\n if not self.dtype.is_numeric():\n msg = "`median` operation not supported for non-numeric input type."\n raise InvalidOperationError(msg)\n\n return maybe_extract_py_scalar(\n pc.approximate_median(self.native), _return_py_scalar\n )\n\n def min(self, *, _return_py_scalar: bool = True) -> Any:\n return maybe_extract_py_scalar(pc.min(self.native), _return_py_scalar)\n\n def max(self, *, _return_py_scalar: bool = True) -> Any:\n return maybe_extract_py_scalar(pc.max(self.native), _return_py_scalar)\n\n def arg_min(self, *, _return_py_scalar: bool = True) -> int:\n index_min = pc.index(self.native, pc.min(self.native))\n return maybe_extract_py_scalar(index_min, _return_py_scalar)\n\n def arg_max(self, *, _return_py_scalar: bool = True) -> int:\n index_max = pc.index(self.native, pc.max(self.native))\n return maybe_extract_py_scalar(index_max, _return_py_scalar)\n\n def sum(self, *, _return_py_scalar: bool = True) -> float:\n return maybe_extract_py_scalar(\n pc.sum(self.native, min_count=0), _return_py_scalar\n )\n\n def drop_nulls(self) -> Self:\n return self._with_native(self.native.drop_null())\n\n def shift(self, n: int) -> Self:\n if n > 0:\n arrays = [nulls_like(n, self), *self.native[:-n].chunks]\n elif n < 0:\n arrays = [*self.native[-n:].chunks, nulls_like(-n, self)]\n else:\n return self._with_native(self.native)\n return self._with_native(pa.concat_arrays(arrays))\n\n def std(self, ddof: int, *, _return_py_scalar: bool = True) -> float:\n return maybe_extract_py_scalar(\n pc.stddev(self.native, ddof=ddof), _return_py_scalar\n )\n\n def var(self, ddof: int, *, _return_py_scalar: bool = True) -> float:\n return maybe_extract_py_scalar(\n pc.variance(self.native, ddof=ddof), _return_py_scalar\n )\n\n def skew(self, *, _return_py_scalar: bool = True) -> float | None:\n ser_not_null = self.native.drop_null()\n if len(ser_not_null) == 0:\n return None\n elif len(ser_not_null) == 1:\n return float("nan")\n elif len(ser_not_null) == 2:\n return 0.0\n else:\n m = pc.subtract(ser_not_null, pc.mean(ser_not_null))\n m2 = pc.mean(pc.power(m, lit(2)))\n m3 = pc.mean(pc.power(m, lit(3)))\n biased_population_skewness = pc.divide(m3, pc.power(m2, lit(1.5)))\n return maybe_extract_py_scalar(biased_population_skewness, _return_py_scalar)\n\n def kurtosis(self, *, _return_py_scalar: bool = True) -> float | None:\n ser_not_null = self.native.drop_null()\n if len(ser_not_null) == 0:\n return None\n elif len(ser_not_null) == 1:\n return float("nan")\n else:\n m = pc.subtract(ser_not_null, pc.mean(ser_not_null))\n m2 = pc.mean(pc.power(m, lit(2)))\n m4 = pc.mean(pc.power(m, lit(4)))\n k = pc.subtract(pc.divide(m4, pc.power(m2, lit(2))), lit(3))\n return maybe_extract_py_scalar(k, _return_py_scalar)\n\n def count(self, *, _return_py_scalar: bool = True) -> int:\n return maybe_extract_py_scalar(pc.count(self.native), _return_py_scalar)\n\n def n_unique(self, *, _return_py_scalar: bool = True) -> int:\n return maybe_extract_py_scalar(\n pc.count(self.native.unique(), mode="all"), _return_py_scalar\n )\n\n def __native_namespace__(self) -> ModuleType:\n if self._implementation is Implementation.PYARROW:\n return self._implementation.to_native_namespace()\n\n msg = f"Expected pyarrow, got: {type(self._implementation)}" # pragma: no cover\n raise AssertionError(msg)\n\n @property\n def name(self) -> str:\n return self._name\n\n def _gather(self, rows: SizedMultiIndexSelector[ChunkedArrayAny]) -> Self:\n if len(rows) == 0:\n return self._with_native(self.native.slice(0, 0))\n if self._backend_version < (18,) and isinstance(rows, tuple):\n rows = list(rows)\n return self._with_native(self.native.take(rows))\n\n def _gather_slice(self, rows: _SliceIndex | range) -> Self:\n start = rows.start or 0\n stop = rows.stop if rows.stop is not None else len(self.native)\n if start < 0:\n start = len(self.native) + start\n if stop < 0:\n stop = len(self.native) + stop\n if rows.step is not None and rows.step != 1:\n msg = "Slicing with step is not supported on PyArrow tables"\n raise NotImplementedError(msg)\n return self._with_native(self.native.slice(start, stop - start))\n\n def scatter(self, indices: int | Sequence[int], values: Any) -> Self:\n import numpy as np # ignore-banned-import\n\n values_native: ArrayAny\n if isinstance(indices, int):\n indices_native = pa.array([indices])\n values_native = pa.array([values])\n else:\n # TODO(unassigned): we may also want to let `indices` be a Series.\n # https://github.com/narwhals-dev/narwhals/issues/2155\n indices_native = pa.array(indices)\n if isinstance(values, self.__class__):\n values_native = values.native.combine_chunks()\n else:\n # NOTE: Requires fixes in https://github.com/zen-xu/pyarrow-stubs/pull/209\n pa_array: Incomplete = pa.array\n values_native = pa_array(values)\n\n sorting_indices = pc.sort_indices(indices_native)\n indices_native = indices_native.take(sorting_indices)\n values_native = values_native.take(sorting_indices)\n\n mask: _1DArray = np.zeros(self.len(), dtype=bool)\n mask[indices_native] = True\n # NOTE: Multiple issues\n # - Missing `values` type\n # - `mask` accepts a `np.ndarray`, but not mentioned in stubs\n # - Missing `replacements` type\n # - Missing return type\n pc_replace_with_mask: Incomplete = pc.replace_with_mask\n return self._with_native(\n pc_replace_with_mask(self.native, mask, values_native.take(indices_native))\n )\n\n def to_list(self) -> list[Any]:\n return self.native.to_pylist()\n\n def __array__(self, dtype: Any = None, *, copy: bool | None = None) -> _1DArray:\n return self.native.__array__(dtype=dtype, copy=copy)\n\n def to_numpy(self, dtype: Any = None, *, copy: bool | None = None) -> _1DArray:\n return self.native.to_numpy()\n\n def alias(self, name: str) -> Self:\n result = self.__class__(\n self.native,\n name=name,\n backend_version=self._backend_version,\n version=self._version,\n )\n result._broadcast = self._broadcast\n return result\n\n @property\n def dtype(self) -> DType:\n return native_to_narwhals_dtype(self.native.type, self._version)\n\n def abs(self) -> Self:\n return self._with_native(pc.abs(self.native))\n\n def cum_sum(self, *, reverse: bool) -> Self:\n cum_sum = pc.cumulative_sum\n result = (\n cum_sum(self.native, skip_nulls=True)\n if not reverse\n else cum_sum(self.native[::-1], skip_nulls=True)[::-1]\n )\n return self._with_native(result)\n\n def round(self, decimals: int) -> Self:\n return self._with_native(\n pc.round(self.native, decimals, round_mode="half_towards_infinity")\n )\n\n def diff(self) -> Self:\n return self._with_native(pc.pairwise_diff(self.native.combine_chunks()))\n\n def any(self, *, _return_py_scalar: bool = True) -> bool:\n return maybe_extract_py_scalar(\n pc.any(self.native, min_count=0), _return_py_scalar\n )\n\n def all(self, *, _return_py_scalar: bool = True) -> bool:\n return maybe_extract_py_scalar(\n pc.all(self.native, min_count=0), _return_py_scalar\n )\n\n def is_between(\n self, lower_bound: Any, upper_bound: Any, closed: ClosedInterval\n ) -> Self:\n _, lower_bound = extract_native(self, lower_bound)\n _, upper_bound = extract_native(self, upper_bound)\n if closed == "left":\n ge = pc.greater_equal(self.native, lower_bound)\n lt = pc.less(self.native, upper_bound)\n res = pc.and_kleene(ge, lt)\n elif closed == "right":\n gt = pc.greater(self.native, lower_bound)\n le = pc.less_equal(self.native, upper_bound)\n res = pc.and_kleene(gt, le)\n elif closed == "none":\n gt = pc.greater(self.native, lower_bound)\n lt = pc.less(self.native, upper_bound)\n res = pc.and_kleene(gt, lt)\n elif closed == "both":\n ge = pc.greater_equal(self.native, lower_bound)\n le = pc.less_equal(self.native, upper_bound)\n res = pc.and_kleene(ge, le)\n else:\n assert_never(closed)\n return self._with_native(res)\n\n def is_null(self) -> Self:\n return self._with_native(self.native.is_null(), preserve_broadcast=True)\n\n def is_nan(self) -> Self:\n return self._with_native(pc.is_nan(self.native), preserve_broadcast=True)\n\n def cast(self, dtype: IntoDType) -> Self:\n data_type = narwhals_to_native_dtype(dtype, self._version)\n return self._with_native(pc.cast(self.native, data_type), preserve_broadcast=True)\n\n def null_count(self, *, _return_py_scalar: bool = True) -> int:\n return maybe_extract_py_scalar(self.native.null_count, _return_py_scalar)\n\n def head(self, n: int) -> Self:\n if n >= 0:\n return self._with_native(self.native.slice(0, n))\n else:\n num_rows = len(self)\n return self._with_native(self.native.slice(0, max(0, num_rows + n)))\n\n def tail(self, n: int) -> Self:\n if n >= 0:\n num_rows = len(self)\n return self._with_native(self.native.slice(max(0, num_rows - n)))\n else:\n return self._with_native(self.native.slice(abs(n)))\n\n def is_in(self, other: Any) -> Self:\n if self._is_native(other):\n value_set: ArrayOrChunkedArray = other\n else:\n value_set = pa.array(other)\n return self._with_native(pc.is_in(self.native, value_set=value_set))\n\n def arg_true(self) -> Self:\n import numpy as np # ignore-banned-import\n\n res = np.flatnonzero(self.native)\n return self.from_iterable(res, name=self.name, context=self)\n\n def item(self, index: int | None = None) -> Any:\n if index is None:\n if len(self) != 1:\n msg = (\n "can only call '.item()' if the Series is of length 1,"\n f" or an explicit index is provided (Series is of length {len(self)})"\n )\n raise ValueError(msg)\n return maybe_extract_py_scalar(self.native[0], return_py_scalar=True)\n return maybe_extract_py_scalar(self.native[index], return_py_scalar=True)\n\n def value_counts(\n self, *, sort: bool, parallel: bool, name: str | None, normalize: bool\n ) -> ArrowDataFrame:\n """Parallel is unused, exists for compatibility."""\n from narwhals._arrow.dataframe import ArrowDataFrame\n\n index_name_ = "index" if self._name is None else self._name\n value_name_ = name or ("proportion" if normalize else "count")\n\n val_counts = pc.value_counts(self.native)\n values = val_counts.field("values")\n counts = cast("ChunkedArrayAny", val_counts.field("counts"))\n\n if normalize:\n arrays = [values, pc.divide(*cast_for_truediv(counts, pc.sum(counts)))]\n else:\n arrays = [values, counts]\n\n val_count = pa.Table.from_arrays(arrays, names=[index_name_, value_name_])\n\n if sort:\n val_count = val_count.sort_by([(value_name_, "descending")])\n\n return ArrowDataFrame(\n val_count,\n backend_version=self._backend_version,\n version=self._version,\n validate_column_names=True,\n )\n\n def zip_with(self, mask: Self, other: Self) -> Self:\n cond = mask.native.combine_chunks()\n return self._with_native(pc.if_else(cond, self.native, other.native))\n\n def sample(\n self,\n n: int | None,\n *,\n fraction: float | None,\n with_replacement: bool,\n seed: int | None,\n ) -> Self:\n import numpy as np # ignore-banned-import\n\n num_rows = len(self)\n if n is None and fraction is not None:\n n = int(num_rows * fraction)\n\n rng = np.random.default_rng(seed=seed)\n idx = np.arange(num_rows)\n mask = rng.choice(idx, size=n, replace=with_replacement)\n return self._with_native(self.native.take(mask))\n\n def fill_null(\n self,\n value: Self | NonNestedLiteral,\n strategy: FillNullStrategy | None,\n limit: int | None,\n ) -> Self:\n import numpy as np # ignore-banned-import\n\n def fill_aux(\n arr: ChunkedArrayAny, limit: int, direction: FillNullStrategy | None\n ) -> ArrayAny:\n # this algorithm first finds the indices of the valid values to fill all the null value positions\n # then it calculates the distance of each new index and the original index\n # if the distance is equal to or less than the limit and the original value is null, it is replaced\n valid_mask = pc.is_valid(arr)\n indices = pa.array(np.arange(len(arr)), type=pa.int64())\n if direction == "forward":\n valid_index = np.maximum.accumulate(np.where(valid_mask, indices, -1))\n distance = indices - valid_index\n else:\n valid_index = np.minimum.accumulate(\n np.where(valid_mask[::-1], indices[::-1], len(arr))\n )[::-1]\n distance = valid_index - indices\n return pc.if_else(\n pc.and_(pc.is_null(arr), pc.less_equal(distance, lit(limit))), # pyright: ignore[reportArgumentType, reportCallIssue]\n arr.take(valid_index),\n arr,\n )\n\n if value is not None:\n _, native_value = extract_native(self, value)\n series: ArrayOrScalar = pc.fill_null(self.native, native_value)\n elif limit is None:\n fill_func = (\n pc.fill_null_forward if strategy == "forward" else pc.fill_null_backward\n )\n series = fill_func(self.native)\n else:\n series = fill_aux(self.native, limit, strategy)\n return self._with_native(series, preserve_broadcast=True)\n\n def to_frame(self) -> ArrowDataFrame:\n from narwhals._arrow.dataframe import ArrowDataFrame\n\n df = pa.Table.from_arrays([self.native], names=[self.name])\n return ArrowDataFrame(\n df,\n backend_version=self._backend_version,\n version=self._version,\n validate_column_names=False,\n )\n\n def to_pandas(self) -> pd.Series[Any]:\n import pandas as pd # ignore-banned-import()\n\n return pd.Series(self.native, name=self.name)\n\n def to_polars(self) -> pl.Series:\n import polars as pl # ignore-banned-import\n\n return cast("pl.Series", pl.from_arrow(self.native))\n\n def is_unique(self) -> ArrowSeries:\n return self.to_frame().is_unique().alias(self.name)\n\n def is_first_distinct(self) -> Self:\n import numpy as np # ignore-banned-import\n\n row_number = pa.array(np.arange(len(self)))\n col_token = generate_temporary_column_name(n_bytes=8, columns=[self.name])\n first_distinct_index = (\n pa.Table.from_arrays([self.native], names=[self.name])\n .append_column(col_token, row_number)\n .group_by(self.name)\n .aggregate([(col_token, "min")])\n .column(f"{col_token}_min")\n )\n\n return self._with_native(pc.is_in(row_number, first_distinct_index))\n\n def is_last_distinct(self) -> Self:\n import numpy as np # ignore-banned-import\n\n row_number = pa.array(np.arange(len(self)))\n col_token = generate_temporary_column_name(n_bytes=8, columns=[self.name])\n last_distinct_index = (\n pa.Table.from_arrays([self.native], names=[self.name])\n .append_column(col_token, row_number)\n .group_by(self.name)\n .aggregate([(col_token, "max")])\n .column(f"{col_token}_max")\n )\n\n return self._with_native(pc.is_in(row_number, last_distinct_index))\n\n def is_sorted(self, *, descending: bool) -> bool:\n if not isinstance(descending, bool):\n msg = f"argument 'descending' should be boolean, found {type(descending)}"\n raise TypeError(msg)\n if descending:\n result = pc.all(pc.greater_equal(self.native[:-1], self.native[1:]))\n else:\n result = pc.all(pc.less_equal(self.native[:-1], self.native[1:]))\n return maybe_extract_py_scalar(result, return_py_scalar=True)\n\n def unique(self, *, maintain_order: bool) -> Self:\n # TODO(marco): `pc.unique` seems to always maintain order, is that guaranteed?\n return self._with_native(self.native.unique())\n\n def replace_strict(\n self,\n old: Sequence[Any] | Mapping[Any, Any],\n new: Sequence[Any],\n *,\n return_dtype: IntoDType | None,\n ) -> Self:\n # https://stackoverflow.com/a/79111029/4451315\n idxs = pc.index_in(self.native, pa.array(old))\n result_native = pc.take(pa.array(new), idxs)\n if return_dtype is not None:\n result_native.cast(narwhals_to_native_dtype(return_dtype, self._version))\n result = self._with_native(result_native)\n if result.is_null().sum() != self.is_null().sum():\n msg = (\n "replace_strict did not replace all non-null values.\n\n"\n "The following did not get replaced: "\n f"{self.filter(~self.is_null() & result.is_null()).unique(maintain_order=False).to_list()}"\n )\n raise ValueError(msg)\n return result\n\n def sort(self, *, descending: bool, nulls_last: bool) -> Self:\n order: Order = "descending" if descending else "ascending"\n null_placement: NullPlacement = "at_end" if nulls_last else "at_start"\n sorted_indices = pc.array_sort_indices(\n self.native, order=order, null_placement=null_placement\n )\n return self._with_native(self.native.take(sorted_indices))\n\n def to_dummies(self, *, separator: str, drop_first: bool) -> ArrowDataFrame:\n import numpy as np # ignore-banned-import\n\n from narwhals._arrow.dataframe import ArrowDataFrame\n\n name = self._name\n # NOTE: stub is missing attributes (https://arrow.apache.org/docs/python/generated/pyarrow.DictionaryArray.html)\n da: Incomplete = self.native.combine_chunks().dictionary_encode("encode")\n\n columns: _2DArray = np.zeros((len(da.dictionary), len(da)), np.int8)\n columns[da.indices, np.arange(len(da))] = 1\n null_col_pa, null_col_pl = f"{name}{separator}None", f"{name}{separator}null"\n cols = [\n {null_col_pa: null_col_pl}.get(\n f"{name}{separator}{v}", f"{name}{separator}{v}"\n )\n for v in da.dictionary\n ]\n\n output_order = (\n [\n null_col_pl,\n *sorted([c for c in cols if c != null_col_pl])[int(drop_first) :],\n ]\n if null_col_pl in cols\n else sorted(cols)[int(drop_first) :]\n )\n return ArrowDataFrame(\n pa.Table.from_arrays(columns, names=cols),\n backend_version=self._backend_version,\n version=self._version,\n validate_column_names=True,\n ).simple_select(*output_order)\n\n def quantile(\n self,\n quantile: float,\n interpolation: RollingInterpolationMethod,\n *,\n _return_py_scalar: bool = True,\n ) -> float:\n return maybe_extract_py_scalar(\n pc.quantile(self.native, q=quantile, interpolation=interpolation)[0],\n _return_py_scalar,\n )\n\n def gather_every(self, n: int, offset: int = 0) -> Self:\n return self._with_native(self.native[offset::n])\n\n def clip(\n self,\n lower_bound: Self | NumericLiteral | TemporalLiteral | None,\n upper_bound: Self | NumericLiteral | TemporalLiteral | None,\n ) -> Self:\n _, lower = extract_native(self, lower_bound) if lower_bound else (None, None)\n _, upper = extract_native(self, upper_bound) if upper_bound else (None, None)\n\n if lower is None:\n return self._with_native(pc.min_element_wise(self.native, upper))\n if upper is None:\n return self._with_native(pc.max_element_wise(self.native, lower))\n return self._with_native(\n pc.max_element_wise(pc.min_element_wise(self.native, upper), lower)\n )\n\n def to_arrow(self) -> ArrayAny:\n return self.native.combine_chunks()\n\n def mode(self) -> ArrowSeries:\n plx = self.__narwhals_namespace__()\n col_token = generate_temporary_column_name(n_bytes=8, columns=[self.name])\n counts = self.value_counts(\n name=col_token, normalize=False, sort=False, parallel=False\n )\n return counts.filter(\n plx.col(col_token)\n == plx.col(col_token).max().broadcast(kind=ExprKind.AGGREGATION)\n ).get_column(self.name)\n\n def is_finite(self) -> Self:\n return self._with_native(pc.is_finite(self.native))\n\n def cum_count(self, *, reverse: bool) -> Self:\n dtypes = self._version.dtypes\n return (~self.is_null()).cast(dtypes.UInt32()).cum_sum(reverse=reverse)\n\n @requires.backend_version((13,))\n def cum_min(self, *, reverse: bool) -> Self:\n result = (\n pc.cumulative_min(self.native, skip_nulls=True)\n if not reverse\n else pc.cumulative_min(self.native[::-1], skip_nulls=True)[::-1]\n )\n return self._with_native(result)\n\n @requires.backend_version((13,))\n def cum_max(self, *, reverse: bool) -> Self:\n result = (\n pc.cumulative_max(self.native, skip_nulls=True)\n if not reverse\n else pc.cumulative_max(self.native[::-1], skip_nulls=True)[::-1]\n )\n return self._with_native(result)\n\n @requires.backend_version((13,))\n def cum_prod(self, *, reverse: bool) -> Self:\n result = (\n pc.cumulative_prod(self.native, skip_nulls=True)\n if not reverse\n else pc.cumulative_prod(self.native[::-1], skip_nulls=True)[::-1]\n )\n return self._with_native(result)\n\n def rolling_sum(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n min_samples = min_samples if min_samples is not None else window_size\n padded_series, offset = pad_series(self, window_size=window_size, center=center)\n\n cum_sum = padded_series.cum_sum(reverse=False).fill_null(\n value=None, strategy="forward", limit=None\n )\n rolling_sum = (\n cum_sum\n - cum_sum.shift(window_size).fill_null(value=0, strategy=None, limit=None)\n if window_size != 0\n else cum_sum\n )\n\n valid_count = padded_series.cum_count(reverse=False)\n count_in_window = valid_count - valid_count.shift(window_size).fill_null(\n value=0, strategy=None, limit=None\n )\n\n result = self._with_native(\n pc.if_else((count_in_window >= min_samples).native, rolling_sum.native, None)\n )\n return result._gather_slice(slice(offset, None))\n\n def rolling_mean(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n min_samples = min_samples if min_samples is not None else window_size\n padded_series, offset = pad_series(self, window_size=window_size, center=center)\n\n cum_sum = padded_series.cum_sum(reverse=False).fill_null(\n value=None, strategy="forward", limit=None\n )\n rolling_sum = (\n cum_sum\n - cum_sum.shift(window_size).fill_null(value=0, strategy=None, limit=None)\n if window_size != 0\n else cum_sum\n )\n\n valid_count = padded_series.cum_count(reverse=False)\n count_in_window = valid_count - valid_count.shift(window_size).fill_null(\n value=0, strategy=None, limit=None\n )\n\n result = (\n self._with_native(\n pc.if_else(\n (count_in_window >= min_samples).native, rolling_sum.native, None\n )\n )\n / count_in_window\n )\n return result._gather_slice(slice(offset, None))\n\n def rolling_var(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n min_samples = min_samples if min_samples is not None else window_size\n padded_series, offset = pad_series(self, window_size=window_size, center=center)\n\n cum_sum = padded_series.cum_sum(reverse=False).fill_null(\n value=None, strategy="forward", limit=None\n )\n rolling_sum = (\n cum_sum\n - cum_sum.shift(window_size).fill_null(value=0, strategy=None, limit=None)\n if window_size != 0\n else cum_sum\n )\n\n cum_sum_sq = (\n pow(padded_series, 2)\n .cum_sum(reverse=False)\n .fill_null(value=None, strategy="forward", limit=None)\n )\n rolling_sum_sq = (\n cum_sum_sq\n - cum_sum_sq.shift(window_size).fill_null(value=0, strategy=None, limit=None)\n if window_size != 0\n else cum_sum_sq\n )\n\n valid_count = padded_series.cum_count(reverse=False)\n count_in_window = valid_count - valid_count.shift(window_size).fill_null(\n value=0, strategy=None, limit=None\n )\n\n result = self._with_native(\n pc.if_else(\n (count_in_window >= min_samples).native,\n (rolling_sum_sq - (rolling_sum**2 / count_in_window)).native,\n None,\n )\n ) / self._with_native(pc.max_element_wise((count_in_window - ddof).native, 0))\n\n return result._gather_slice(slice(offset, None, None))\n\n def rolling_std(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n return (\n self.rolling_var(\n window_size=window_size, min_samples=min_samples, center=center, ddof=ddof\n )\n ** 0.5\n )\n\n def rank(self, method: RankMethod, *, descending: bool) -> Self:\n if method == "average":\n msg = (\n "`rank` with `method='average' is not supported for pyarrow backend. "\n "The available methods are {'min', 'max', 'dense', 'ordinal'}."\n )\n raise ValueError(msg)\n\n sort_keys: Order = "descending" if descending else "ascending"\n tiebreaker: TieBreaker = "first" if method == "ordinal" else method\n\n native_series: ArrayOrChunkedArray\n if self._backend_version < (14, 0, 0): # pragma: no cover\n native_series = self.native.combine_chunks()\n else:\n native_series = self.native\n\n null_mask = pc.is_null(native_series)\n\n rank = pc.rank(native_series, sort_keys=sort_keys, tiebreaker=tiebreaker)\n\n result = pc.if_else(null_mask, lit(None, rank.type), rank)\n return self._with_native(result)\n\n @requires.backend_version((13,))\n def hist( # noqa: C901, PLR0912, PLR0915\n self,\n bins: list[float | int] | None,\n *,\n bin_count: int | None,\n include_breakpoint: bool,\n ) -> ArrowDataFrame:\n import numpy as np # ignore-banned-import\n\n from narwhals._arrow.dataframe import ArrowDataFrame\n\n def _hist_from_bin_count(bin_count: int): # type: ignore[no-untyped-def] # noqa: ANN202\n d = pc.min_max(self.native)\n lower, upper = d["min"].as_py(), d["max"].as_py()\n if lower == upper:\n lower -= 0.5\n upper += 0.5\n bins = np.linspace(lower, upper, bin_count + 1)\n return _hist_from_bins(bins)\n\n def _hist_from_bins(bins: Sequence[int | float]): # type: ignore[no-untyped-def] # noqa: ANN202\n bin_indices = np.searchsorted(bins, self.native, side="left")\n bin_indices = pc.if_else( # lowest bin is inclusive\n pc.equal(self.native, lit(bins[0])), 1, bin_indices\n )\n\n # align unique categories and counts appropriately\n obs_cats, obs_counts = np.unique(bin_indices, return_counts=True)\n obj_cats = np.arange(1, len(bins))\n counts = np.zeros_like(obj_cats)\n counts[np.isin(obj_cats, obs_cats)] = obs_counts[np.isin(obs_cats, obj_cats)]\n\n bin_right = bins[1:]\n return counts, bin_right\n\n counts: Sequence[int | float | pa.Scalar[Any]] | np.typing.ArrayLike\n bin_right: Sequence[int | float | pa.Scalar[Any]] | np.typing.ArrayLike\n\n data_count = pc.sum(\n pc.invert(pc.or_(pc.is_nan(self.native), pc.is_null(self.native))).cast(\n pa.uint8()\n ),\n min_count=0,\n )\n if bins is not None:\n if len(bins) < 2:\n counts, bin_right = [], []\n\n elif data_count == pa.scalar(0, type=pa.uint64()): # type:ignore[comparison-overlap]\n counts = np.zeros(len(bins) - 1)\n bin_right = bins[1:]\n\n elif len(bins) == 2:\n counts = [\n pc.sum(\n pc.and_(\n pc.greater_equal(self.native, lit(float(bins[0]))),\n pc.less_equal(self.native, lit(float(bins[1]))),\n ).cast(pa.uint8())\n )\n ]\n bin_right = [bins[-1]]\n else:\n counts, bin_right = _hist_from_bins(bins)\n\n elif bin_count is not None:\n if bin_count == 0:\n counts, bin_right = [], []\n elif data_count == pa.scalar(0, type=pa.uint64()): # type:ignore[comparison-overlap]\n counts, bin_right = (\n np.zeros(bin_count),\n np.linspace(0, 1, bin_count + 1)[1:],\n )\n elif bin_count == 1:\n d = pc.min_max(self.native)\n lower, upper = d["min"], d["max"]\n if lower == upper:\n counts, bin_right = [data_count], [pc.add(upper, pa.scalar(0.5))]\n else:\n counts, bin_right = [data_count], [upper]\n else:\n counts, bin_right = _hist_from_bin_count(bin_count)\n\n else: # pragma: no cover\n # caller guarantees that either bins or bin_count is specified\n msg = "must provide one of `bin_count` or `bins`"\n raise InvalidOperationError(msg)\n\n data: dict[str, Any] = {}\n if include_breakpoint:\n data["breakpoint"] = bin_right\n data["count"] = counts\n\n return ArrowDataFrame(\n pa.Table.from_pydict(data),\n backend_version=self._backend_version,\n version=self._version,\n validate_column_names=True,\n )\n\n def __iter__(self) -> Iterator[Any]:\n for x in self.native:\n yield maybe_extract_py_scalar(x, return_py_scalar=True)\n\n def __contains__(self, other: Any) -> bool:\n from pyarrow import (\n ArrowInvalid, # ignore-banned-imports\n ArrowNotImplementedError, # ignore-banned-imports\n ArrowTypeError, # ignore-banned-imports\n )\n\n try:\n other_ = lit(other) if other is not None else lit(None, type=self._type)\n return maybe_extract_py_scalar(\n pc.is_in(other_, self.native), return_py_scalar=True\n )\n except (ArrowInvalid, ArrowNotImplementedError, ArrowTypeError) as exc:\n from narwhals.exceptions import InvalidOperationError\n\n msg = f"Unable to compare other of type {type(other)} with series of type {self.dtype}."\n raise InvalidOperationError(msg) from exc\n\n def log(self, base: float) -> Self:\n return self._with_native(pc.logb(self.native, lit(base)))\n\n def exp(self) -> Self:\n return self._with_native(pc.exp(self.native))\n\n def sqrt(self) -> Self:\n return self._with_native(pc.sqrt(self.native))\n\n @property\n def dt(self) -> ArrowSeriesDateTimeNamespace:\n return ArrowSeriesDateTimeNamespace(self)\n\n @property\n def cat(self) -> ArrowSeriesCatNamespace:\n return ArrowSeriesCatNamespace(self)\n\n @property\n def str(self) -> ArrowSeriesStringNamespace:\n return ArrowSeriesStringNamespace(self)\n\n @property\n def list(self) -> ArrowSeriesListNamespace:\n return ArrowSeriesListNamespace(self)\n\n @property\n def struct(self) -> ArrowSeriesStructNamespace:\n return ArrowSeriesStructNamespace(self)\n\n ewm_mean = not_implemented()\n
.venv\Lib\site-packages\narwhals\_arrow\series.py
series.py
Python
44,983
0.95
0.177539
0.025717
awesome-app
706
2024-02-26T13:32:15.425471
Apache-2.0
false
0dcf5d59ec0f7a8f677a4eee3ddc59d9
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport pyarrow as pa\n\nfrom narwhals._arrow.utils import ArrowSeriesNamespace\n\nif TYPE_CHECKING:\n from narwhals._arrow.series import ArrowSeries\n from narwhals._arrow.typing import Incomplete\n\n\nclass ArrowSeriesCatNamespace(ArrowSeriesNamespace):\n def get_categories(self) -> ArrowSeries:\n # NOTE: Should be `list[pa.DictionaryArray]`, but `DictionaryArray` has no attributes\n chunks: Incomplete = self.native.chunks\n return self.with_native(pa.concat_arrays(x.dictionary for x in chunks).unique())\n
.venv\Lib\site-packages\narwhals\_arrow\series_cat.py
series_cat.py
Python
598
0.95
0.222222
0.083333
awesome-app
678
2023-09-02T08:25:16.127028
MIT
false
e5673404000d0b0fbd3c88750d5bed4f
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Callable, ClassVar, cast\n\nimport pyarrow as pa\nimport pyarrow.compute as pc\n\nfrom narwhals._arrow.utils import UNITS_DICT, ArrowSeriesNamespace, floordiv_compat, lit\nfrom narwhals._constants import (\n MS_PER_MINUTE,\n MS_PER_SECOND,\n NS_PER_MICROSECOND,\n NS_PER_MILLISECOND,\n NS_PER_MINUTE,\n NS_PER_SECOND,\n SECONDS_PER_DAY,\n SECONDS_PER_MINUTE,\n US_PER_MINUTE,\n US_PER_SECOND,\n)\nfrom narwhals._duration import parse_interval_string\n\nif TYPE_CHECKING:\n from collections.abc import Mapping\n\n from typing_extensions import TypeAlias\n\n from narwhals._arrow.series import ArrowSeries\n from narwhals._arrow.typing import ChunkedArrayAny, ScalarAny\n from narwhals.dtypes import Datetime\n from narwhals.typing import TimeUnit\n\n UnitCurrent: TypeAlias = TimeUnit\n UnitTarget: TypeAlias = TimeUnit\n BinOpBroadcast: TypeAlias = Callable[[ChunkedArrayAny, ScalarAny], ChunkedArrayAny]\n IntoRhs: TypeAlias = int\n\n\nclass ArrowSeriesDateTimeNamespace(ArrowSeriesNamespace):\n _TIMESTAMP_DATE_FACTOR: ClassVar[Mapping[TimeUnit, int]] = {\n "ns": NS_PER_SECOND,\n "us": US_PER_SECOND,\n "ms": MS_PER_SECOND,\n "s": 1,\n }\n _TIMESTAMP_DATETIME_OP_FACTOR: ClassVar[\n Mapping[tuple[UnitCurrent, UnitTarget], tuple[BinOpBroadcast, IntoRhs]]\n ] = {\n ("ns", "us"): (floordiv_compat, 1_000),\n ("ns", "ms"): (floordiv_compat, 1_000_000),\n ("us", "ns"): (pc.multiply, NS_PER_MICROSECOND),\n ("us", "ms"): (floordiv_compat, 1_000),\n ("ms", "ns"): (pc.multiply, NS_PER_MILLISECOND),\n ("ms", "us"): (pc.multiply, 1_000),\n ("s", "ns"): (pc.multiply, NS_PER_SECOND),\n ("s", "us"): (pc.multiply, US_PER_SECOND),\n ("s", "ms"): (pc.multiply, MS_PER_SECOND),\n }\n\n @property\n def unit(self) -> TimeUnit: # NOTE: Unsafe (native).\n return cast("pa.TimestampType[TimeUnit, Any]", self.native.type).unit\n\n @property\n def time_zone(self) -> str | None: # NOTE: Unsafe (narwhals).\n return cast("Datetime", self.compliant.dtype).time_zone\n\n def to_string(self, format: str) -> ArrowSeries:\n # PyArrow differs from other libraries in that %S also prints out\n # the fractional part of the second...:'(\n # https://arrow.apache.org/docs/python/generated/pyarrow.compute.strftime.html\n format = format.replace("%S.%f", "%S").replace("%S%.f", "%S")\n return self.with_native(pc.strftime(self.native, format))\n\n def replace_time_zone(self, time_zone: str | None) -> ArrowSeries:\n if time_zone is not None:\n result = pc.assume_timezone(pc.local_timestamp(self.native), time_zone)\n else:\n result = pc.local_timestamp(self.native)\n return self.with_native(result)\n\n def convert_time_zone(self, time_zone: str) -> ArrowSeries:\n ser = self.replace_time_zone("UTC") if self.time_zone is None else self.compliant\n return self.with_native(ser.native.cast(pa.timestamp(self.unit, time_zone)))\n\n def timestamp(self, time_unit: TimeUnit) -> ArrowSeries:\n ser = self.compliant\n dtypes = ser._version.dtypes\n if isinstance(ser.dtype, dtypes.Datetime):\n current = ser.dtype.time_unit\n s_cast = self.native.cast(pa.int64())\n if current == time_unit:\n result = s_cast\n elif item := self._TIMESTAMP_DATETIME_OP_FACTOR.get((current, time_unit)):\n fn, factor = item\n result = fn(s_cast, lit(factor))\n else: # pragma: no cover\n msg = f"unexpected time unit {current}, please report an issue at https://github.com/narwhals-dev/narwhals"\n raise AssertionError(msg)\n return self.with_native(result)\n elif isinstance(ser.dtype, dtypes.Date):\n time_s = pc.multiply(self.native.cast(pa.int32()), lit(SECONDS_PER_DAY))\n factor = self._TIMESTAMP_DATE_FACTOR[time_unit]\n return self.with_native(pc.multiply(time_s, lit(factor)))\n else:\n msg = "Input should be either of Date or Datetime type"\n raise TypeError(msg)\n\n def date(self) -> ArrowSeries:\n return self.with_native(self.native.cast(pa.date32()))\n\n def year(self) -> ArrowSeries:\n return self.with_native(pc.year(self.native))\n\n def month(self) -> ArrowSeries:\n return self.with_native(pc.month(self.native))\n\n def day(self) -> ArrowSeries:\n return self.with_native(pc.day(self.native))\n\n def hour(self) -> ArrowSeries:\n return self.with_native(pc.hour(self.native))\n\n def minute(self) -> ArrowSeries:\n return self.with_native(pc.minute(self.native))\n\n def second(self) -> ArrowSeries:\n return self.with_native(pc.second(self.native))\n\n def millisecond(self) -> ArrowSeries:\n return self.with_native(pc.millisecond(self.native))\n\n def microsecond(self) -> ArrowSeries:\n arr = self.native\n result = pc.add(pc.multiply(pc.millisecond(arr), lit(1000)), pc.microsecond(arr))\n return self.with_native(result)\n\n def nanosecond(self) -> ArrowSeries:\n result = pc.add(\n pc.multiply(self.microsecond().native, lit(1000)), pc.nanosecond(self.native)\n )\n return self.with_native(result)\n\n def ordinal_day(self) -> ArrowSeries:\n return self.with_native(pc.day_of_year(self.native))\n\n def weekday(self) -> ArrowSeries:\n return self.with_native(pc.day_of_week(self.native, count_from_zero=False))\n\n def total_minutes(self) -> ArrowSeries:\n unit_to_minutes_factor = {\n "s": SECONDS_PER_MINUTE,\n "ms": MS_PER_MINUTE,\n "us": US_PER_MINUTE,\n "ns": NS_PER_MINUTE,\n }\n factor = lit(unit_to_minutes_factor[self.unit], type=pa.int64())\n return self.with_native(pc.divide(self.native, factor).cast(pa.int64()))\n\n def total_seconds(self) -> ArrowSeries:\n unit_to_seconds_factor = {\n "s": 1,\n "ms": MS_PER_SECOND,\n "us": US_PER_SECOND,\n "ns": NS_PER_SECOND,\n }\n factor = lit(unit_to_seconds_factor[self.unit], type=pa.int64())\n return self.with_native(pc.divide(self.native, factor).cast(pa.int64()))\n\n def total_milliseconds(self) -> ArrowSeries:\n unit_to_milli_factor = {\n "s": 1e3, # seconds\n "ms": 1, # milli\n "us": 1e3, # micro\n "ns": 1e6, # nano\n }\n factor = lit(unit_to_milli_factor[self.unit], type=pa.int64())\n if self.unit == "s":\n return self.with_native(pc.multiply(self.native, factor).cast(pa.int64()))\n return self.with_native(pc.divide(self.native, factor).cast(pa.int64()))\n\n def total_microseconds(self) -> ArrowSeries:\n unit_to_micro_factor = {\n "s": 1e6, # seconds\n "ms": 1e3, # milli\n "us": 1, # micro\n "ns": 1e3, # nano\n }\n factor = lit(unit_to_micro_factor[self.unit], type=pa.int64())\n if self.unit in {"s", "ms"}:\n return self.with_native(pc.multiply(self.native, factor).cast(pa.int64()))\n return self.with_native(pc.divide(self.native, factor).cast(pa.int64()))\n\n def total_nanoseconds(self) -> ArrowSeries:\n unit_to_nano_factor = {\n "s": NS_PER_SECOND,\n "ms": NS_PER_MILLISECOND,\n "us": NS_PER_MICROSECOND,\n "ns": 1,\n }\n factor = lit(unit_to_nano_factor[self.unit], type=pa.int64())\n return self.with_native(pc.multiply(self.native, factor).cast(pa.int64()))\n\n def truncate(self, every: str) -> ArrowSeries:\n multiple, unit = parse_interval_string(every)\n return self.with_native(\n pc.floor_temporal(self.native, multiple=multiple, unit=UNITS_DICT[unit])\n )\n
.venv\Lib\site-packages\narwhals\_arrow\series_dt.py
series_dt.py
Python
7,956
0.95
0.153846
0.017143
node-utils
459
2023-10-10T23:56:17.564485
BSD-3-Clause
false
c5aab4c12198b1abc2e48996784b2ec9
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport pyarrow as pa\nimport pyarrow.compute as pc\n\nfrom narwhals._arrow.utils import ArrowSeriesNamespace\n\nif TYPE_CHECKING:\n from narwhals._arrow.series import ArrowSeries\n\n\nclass ArrowSeriesListNamespace(ArrowSeriesNamespace):\n def len(self) -> ArrowSeries:\n return self.with_native(pc.list_value_length(self.native).cast(pa.uint32()))\n
.venv\Lib\site-packages\narwhals\_arrow\series_list.py
series_list.py
Python
421
0.85
0.1875
0
vue-tools
201
2024-08-31T23:57:40.931732
GPL-3.0
false
273f5e014324309edfa5b14c13a733c4
from __future__ import annotations\n\nimport string\nfrom typing import TYPE_CHECKING\n\nimport pyarrow as pa\nimport pyarrow.compute as pc\n\nfrom narwhals._arrow.utils import ArrowSeriesNamespace, lit, parse_datetime_format\n\nif TYPE_CHECKING:\n from narwhals._arrow.series import ArrowSeries\n from narwhals._arrow.typing import Incomplete\n\n\nclass ArrowSeriesStringNamespace(ArrowSeriesNamespace):\n def len_chars(self) -> ArrowSeries:\n return self.with_native(pc.utf8_length(self.native))\n\n def replace(self, pattern: str, value: str, *, literal: bool, n: int) -> ArrowSeries:\n fn = pc.replace_substring if literal else pc.replace_substring_regex\n arr = fn(self.native, pattern, replacement=value, max_replacements=n)\n return self.with_native(arr)\n\n def replace_all(self, pattern: str, value: str, *, literal: bool) -> ArrowSeries:\n return self.replace(pattern, value, literal=literal, n=-1)\n\n def strip_chars(self, characters: str | None) -> ArrowSeries:\n return self.with_native(\n pc.utf8_trim(self.native, characters or string.whitespace)\n )\n\n def starts_with(self, prefix: str) -> ArrowSeries:\n return self.with_native(pc.equal(self.slice(0, len(prefix)).native, lit(prefix)))\n\n def ends_with(self, suffix: str) -> ArrowSeries:\n return self.with_native(\n pc.equal(self.slice(-len(suffix), None).native, lit(suffix))\n )\n\n def contains(self, pattern: str, *, literal: bool) -> ArrowSeries:\n check_func = pc.match_substring if literal else pc.match_substring_regex\n return self.with_native(check_func(self.native, pattern))\n\n def slice(self, offset: int, length: int | None) -> ArrowSeries:\n stop = offset + length if length is not None else None\n return self.with_native(\n pc.utf8_slice_codeunits(self.native, start=offset, stop=stop)\n )\n\n def split(self, by: str) -> ArrowSeries:\n split_series = pc.split_pattern(self.native, by) # type: ignore[call-overload]\n return self.with_native(split_series)\n\n def to_datetime(self, format: str | None) -> ArrowSeries:\n format = parse_datetime_format(self.native) if format is None else format\n timestamp_array = pc.strptime(self.native, format=format, unit="us")\n return self.with_native(timestamp_array)\n\n def to_date(self, format: str | None) -> ArrowSeries:\n return self.to_datetime(format=format).dt.date()\n\n def to_uppercase(self) -> ArrowSeries:\n return self.with_native(pc.utf8_upper(self.native))\n\n def to_lowercase(self) -> ArrowSeries:\n return self.with_native(pc.utf8_lower(self.native))\n\n def zfill(self, width: int) -> ArrowSeries:\n binary_join: Incomplete = pc.binary_join_element_wise\n native = self.native\n hyphen, plus = lit("-"), lit("+")\n\n _slice_length: int | None = (\n self.len_chars().max()\n if self._compliant_series._backend_version < (13, 0)\n else None\n )\n first_char, remaining_chars = (\n self.slice(0, 1).native,\n self.slice(1, _slice_length).native,\n )\n\n # Conditions\n less_than_width = pc.less(pc.utf8_length(native), lit(width))\n starts_with_hyphen = pc.equal(first_char, hyphen)\n starts_with_plus = pc.equal(first_char, plus)\n\n conditions = pc.make_struct(\n pc.and_(starts_with_hyphen, less_than_width),\n pc.and_(starts_with_plus, less_than_width),\n less_than_width,\n )\n\n # Cases\n padded_remaining_chars = pc.utf8_lpad(remaining_chars, width - 1, padding="0")\n\n result = pc.case_when(\n conditions,\n binary_join(\n pa.repeat(hyphen, len(native)), padded_remaining_chars, ""\n ), # starts with hyphen and less than width\n binary_join(\n pa.repeat(plus, len(native)), padded_remaining_chars, ""\n ), # starts with plus and less than width\n pc.utf8_lpad(native, width=width, padding="0"), # less than width\n native,\n )\n return self.with_native(result)\n
.venv\Lib\site-packages\narwhals\_arrow\series_str.py
series_str.py
Python
4,178
0.95
0.192661
0.023529
react-lib
419
2024-05-08T07:25:13.793741
GPL-3.0
false
9f6d49eada6f650a7ae2396e175470eb
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport pyarrow.compute as pc\n\nfrom narwhals._arrow.utils import ArrowSeriesNamespace\n\nif TYPE_CHECKING:\n from narwhals._arrow.series import ArrowSeries\n\n\nclass ArrowSeriesStructNamespace(ArrowSeriesNamespace):\n def field(self, name: str) -> ArrowSeries:\n return self.with_native(pc.struct_field(self.native, name)).alias(name)\n
.venv\Lib\site-packages\narwhals\_arrow\series_struct.py
series_struct.py
Python
410
0.85
0.2
0
node-utils
311
2024-03-09T13:10:38.140931
BSD-3-Clause
false
e0db1b9b169a772585e1fc98149e8bfe
from __future__ import annotations # pragma: no cover\n\nfrom typing import (\n TYPE_CHECKING, # pragma: no cover\n Any, # pragma: no cover\n TypeVar, # pragma: no cover\n)\n\nif TYPE_CHECKING:\n import sys\n from typing import Generic, Literal\n\n if sys.version_info >= (3, 10):\n from typing import TypeAlias\n else:\n from typing_extensions import TypeAlias\n\n import pyarrow as pa\n from pyarrow.__lib_pxi.table import (\n AggregateOptions, # noqa: F401\n Aggregation, # noqa: F401\n )\n from pyarrow._stubs_typing import ( # pyright: ignore[reportMissingModuleSource] # pyright: ignore[reportMissingModuleSource] # pyright: ignore[reportMissingModuleSource]\n Indices, # noqa: F401\n Mask, # noqa: F401\n Order, # noqa: F401\n )\n\n from narwhals._arrow.expr import ArrowExpr\n from narwhals._arrow.series import ArrowSeries\n\n IntoArrowExpr: TypeAlias = "ArrowExpr | ArrowSeries"\n TieBreaker: TypeAlias = Literal["min", "max", "first", "dense"]\n NullPlacement: TypeAlias = Literal["at_start", "at_end"]\n NativeIntervalUnit: TypeAlias = Literal[\n "year",\n "quarter",\n "month",\n "week",\n "day",\n "hour",\n "minute",\n "second",\n "millisecond",\n "microsecond",\n "nanosecond",\n ]\n\n ChunkedArrayAny: TypeAlias = pa.ChunkedArray[Any]\n ArrayAny: TypeAlias = pa.Array[Any]\n ArrayOrChunkedArray: TypeAlias = "ArrayAny | ChunkedArrayAny"\n ScalarAny: TypeAlias = pa.Scalar[Any]\n ArrayOrScalar: TypeAlias = "ArrayOrChunkedArray | ScalarAny"\n ArrayOrScalarT1 = TypeVar("ArrayOrScalarT1", ArrayAny, ChunkedArrayAny, ScalarAny)\n ArrayOrScalarT2 = TypeVar("ArrayOrScalarT2", ArrayAny, ChunkedArrayAny, ScalarAny)\n _AsPyType = TypeVar("_AsPyType")\n\n class _BasicDataType(pa.DataType, Generic[_AsPyType]): ...\n\n\nIncomplete: TypeAlias = Any # pragma: no cover\n"""\nMarker for working code that fails on the stubs.\n\nCommon issues:\n- Annotated for `Array`, but not `ChunkedArray`\n- Relies on typing information that the stubs don't provide statically\n- Missing attributes\n- Incorrect return types\n- Inconsistent use of generic/concrete types\n- `_clone_signature` used on signatures that are not identical\n"""\n
.venv\Lib\site-packages\narwhals\_arrow\typing.py
typing.py
Python
2,286
0.95
0.069444
0
vue-tools
649
2024-07-07T21:55:13.405500
Apache-2.0
false
80783f9281f5cfa54ab01cedc417ef3f
from __future__ import annotations\n\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any, cast\n\nimport pyarrow as pa\nimport pyarrow.compute as pc\n\nfrom narwhals._compliant.series import _SeriesNamespace\nfrom narwhals._utils import isinstance_or_issubclass\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Iterator, Mapping\n\n from typing_extensions import TypeAlias, TypeIs\n\n from narwhals._arrow.series import ArrowSeries\n from narwhals._arrow.typing import (\n ArrayAny,\n ArrayOrScalar,\n ArrayOrScalarT1,\n ArrayOrScalarT2,\n ChunkedArrayAny,\n NativeIntervalUnit,\n ScalarAny,\n )\n from narwhals._duration import IntervalUnit\n from narwhals._utils import Version\n from narwhals.dtypes import DType\n from narwhals.typing import IntoDType, PythonLiteral\n\n # NOTE: stubs don't allow for `ChunkedArray[StructArray]`\n # Intended to represent the `.chunks` property storing `list[pa.StructArray]`\n ChunkedArrayStructArray: TypeAlias = ChunkedArrayAny\n\n def is_timestamp(t: Any) -> TypeIs[pa.TimestampType[Any, Any]]: ...\n def is_duration(t: Any) -> TypeIs[pa.DurationType[Any]]: ...\n def is_list(t: Any) -> TypeIs[pa.ListType[Any]]: ...\n def is_large_list(t: Any) -> TypeIs[pa.LargeListType[Any]]: ...\n def is_fixed_size_list(t: Any) -> TypeIs[pa.FixedSizeListType[Any, Any]]: ...\n def is_dictionary(t: Any) -> TypeIs[pa.DictionaryType[Any, Any, Any]]: ...\n def extract_regex(\n strings: ChunkedArrayAny,\n /,\n pattern: str,\n *,\n options: Any = None,\n memory_pool: Any = None,\n ) -> ChunkedArrayStructArray: ...\nelse:\n from pyarrow.compute import extract_regex\n from pyarrow.types import (\n is_dictionary, # noqa: F401\n is_duration,\n is_fixed_size_list,\n is_large_list,\n is_list,\n is_timestamp,\n )\n\nUNITS_DICT: Mapping[IntervalUnit, NativeIntervalUnit] = {\n "y": "year",\n "q": "quarter",\n "mo": "month",\n "d": "day",\n "h": "hour",\n "m": "minute",\n "s": "second",\n "ms": "millisecond",\n "us": "microsecond",\n "ns": "nanosecond",\n}\n\nlit = pa.scalar\n"""Alias for `pyarrow.scalar`."""\n\n\ndef extract_py_scalar(value: Any, /) -> Any:\n from narwhals._arrow.series import maybe_extract_py_scalar\n\n return maybe_extract_py_scalar(value, return_py_scalar=True)\n\n\ndef chunked_array(\n arr: ArrayOrScalar | list[Iterable[Any]], dtype: pa.DataType | None = None, /\n) -> ChunkedArrayAny:\n if isinstance(arr, pa.ChunkedArray):\n return arr\n if isinstance(arr, list):\n return pa.chunked_array(arr, dtype)\n else:\n return pa.chunked_array([arr], arr.type)\n\n\ndef nulls_like(n: int, series: ArrowSeries) -> ArrayAny:\n """Create a strongly-typed Array instance with all elements null.\n\n Uses the type of `series`, without upseting `mypy`.\n """\n return pa.nulls(n, series.native.type)\n\n\n@lru_cache(maxsize=16)\ndef native_to_narwhals_dtype(dtype: pa.DataType, version: Version) -> DType: # noqa: C901, PLR0912\n dtypes = version.dtypes\n if pa.types.is_int64(dtype):\n return dtypes.Int64()\n if pa.types.is_int32(dtype):\n return dtypes.Int32()\n if pa.types.is_int16(dtype):\n return dtypes.Int16()\n if pa.types.is_int8(dtype):\n return dtypes.Int8()\n if pa.types.is_uint64(dtype):\n return dtypes.UInt64()\n if pa.types.is_uint32(dtype):\n return dtypes.UInt32()\n if pa.types.is_uint16(dtype):\n return dtypes.UInt16()\n if pa.types.is_uint8(dtype):\n return dtypes.UInt8()\n if pa.types.is_boolean(dtype):\n return dtypes.Boolean()\n if pa.types.is_float64(dtype):\n return dtypes.Float64()\n if pa.types.is_float32(dtype):\n return dtypes.Float32()\n # bug in coverage? it shows `31->exit` (where `31` is currently the line number of\n # the next line), even though both when the if condition is true and false are covered\n if ( # pragma: no cover\n pa.types.is_string(dtype)\n or pa.types.is_large_string(dtype)\n or getattr(pa.types, "is_string_view", lambda _: False)(dtype)\n ):\n return dtypes.String()\n if pa.types.is_date32(dtype):\n return dtypes.Date()\n if is_timestamp(dtype):\n return dtypes.Datetime(time_unit=dtype.unit, time_zone=dtype.tz)\n if is_duration(dtype):\n return dtypes.Duration(time_unit=dtype.unit)\n if pa.types.is_dictionary(dtype):\n return dtypes.Categorical()\n if pa.types.is_struct(dtype):\n return dtypes.Struct(\n [\n dtypes.Field(\n dtype.field(i).name,\n native_to_narwhals_dtype(dtype.field(i).type, version),\n )\n for i in range(dtype.num_fields)\n ]\n )\n if is_list(dtype) or is_large_list(dtype):\n return dtypes.List(native_to_narwhals_dtype(dtype.value_type, version))\n if is_fixed_size_list(dtype):\n return dtypes.Array(\n native_to_narwhals_dtype(dtype.value_type, version), dtype.list_size\n )\n if pa.types.is_decimal(dtype):\n return dtypes.Decimal()\n if pa.types.is_time32(dtype) or pa.types.is_time64(dtype):\n return dtypes.Time()\n if pa.types.is_binary(dtype):\n return dtypes.Binary()\n return dtypes.Unknown() # pragma: no cover\n\n\ndef narwhals_to_native_dtype(dtype: IntoDType, version: Version) -> pa.DataType: # noqa: C901, PLR0912\n dtypes = version.dtypes\n if isinstance_or_issubclass(dtype, dtypes.Decimal):\n msg = "Casting to Decimal is not supported yet."\n raise NotImplementedError(msg)\n if isinstance_or_issubclass(dtype, dtypes.Float64):\n return pa.float64()\n if isinstance_or_issubclass(dtype, dtypes.Float32):\n return pa.float32()\n if isinstance_or_issubclass(dtype, dtypes.Int64):\n return pa.int64()\n if isinstance_or_issubclass(dtype, dtypes.Int32):\n return pa.int32()\n if isinstance_or_issubclass(dtype, dtypes.Int16):\n return pa.int16()\n if isinstance_or_issubclass(dtype, dtypes.Int8):\n return pa.int8()\n if isinstance_or_issubclass(dtype, dtypes.UInt64):\n return pa.uint64()\n if isinstance_or_issubclass(dtype, dtypes.UInt32):\n return pa.uint32()\n if isinstance_or_issubclass(dtype, dtypes.UInt16):\n return pa.uint16()\n if isinstance_or_issubclass(dtype, dtypes.UInt8):\n return pa.uint8()\n if isinstance_or_issubclass(dtype, dtypes.String):\n return pa.string()\n if isinstance_or_issubclass(dtype, dtypes.Boolean):\n return pa.bool_()\n if isinstance_or_issubclass(dtype, dtypes.Categorical):\n return pa.dictionary(pa.uint32(), pa.string())\n if isinstance_or_issubclass(dtype, dtypes.Datetime):\n unit = dtype.time_unit\n return pa.timestamp(unit, tz) if (tz := dtype.time_zone) else pa.timestamp(unit)\n if isinstance_or_issubclass(dtype, dtypes.Duration):\n return pa.duration(dtype.time_unit)\n if isinstance_or_issubclass(dtype, dtypes.Date):\n return pa.date32()\n if isinstance_or_issubclass(dtype, dtypes.List):\n return pa.list_(value_type=narwhals_to_native_dtype(dtype.inner, version=version))\n if isinstance_or_issubclass(dtype, dtypes.Struct):\n return pa.struct(\n [\n (field.name, narwhals_to_native_dtype(field.dtype, version=version))\n for field in dtype.fields\n ]\n )\n if isinstance_or_issubclass(dtype, dtypes.Array): # pragma: no cover\n inner = narwhals_to_native_dtype(dtype.inner, version=version)\n list_size = dtype.size\n return pa.list_(inner, list_size=list_size)\n if isinstance_or_issubclass(dtype, dtypes.Time):\n return pa.time64("ns")\n if isinstance_or_issubclass(dtype, dtypes.Binary):\n return pa.binary()\n\n msg = f"Unknown dtype: {dtype}" # pragma: no cover\n raise AssertionError(msg)\n\n\ndef extract_native(\n lhs: ArrowSeries, rhs: ArrowSeries | PythonLiteral | ScalarAny\n) -> tuple[ChunkedArrayAny | ScalarAny, ChunkedArrayAny | ScalarAny]:\n """Extract native objects in binary operation.\n\n If the comparison isn't supported, return `NotImplemented` so that the\n "right-hand-side" operation (e.g. `__radd__`) can be tried.\n\n If one of the two sides has a `_broadcast` flag, then extract the scalar\n underneath it so that PyArrow can do its own broadcasting.\n """\n from narwhals._arrow.series import ArrowSeries\n\n if rhs is None: # pragma: no cover\n return lhs.native, lit(None, type=lhs._type)\n\n if isinstance(rhs, ArrowSeries):\n if lhs._broadcast and not rhs._broadcast:\n return lhs.native[0], rhs.native\n if rhs._broadcast:\n return lhs.native, rhs.native[0]\n return lhs.native, rhs.native\n\n if isinstance(rhs, list):\n msg = "Expected Series or scalar, got list."\n raise TypeError(msg)\n\n return lhs.native, rhs if isinstance(rhs, pa.Scalar) else lit(rhs)\n\n\ndef floordiv_compat(left: ArrayOrScalar, right: ArrayOrScalar, /) -> Any:\n # The following lines are adapted from pandas' pyarrow implementation.\n # Ref: https://github.com/pandas-dev/pandas/blob/262fcfbffcee5c3116e86a951d8b693f90411e68/pandas/core/arrays/arrow/array.py#L124-L154\n\n if pa.types.is_integer(left.type) and pa.types.is_integer(right.type):\n divided = pc.divide_checked(left, right)\n # TODO @dangotbanned: Use a `TypeVar` in guards\n # Narrowing to a `Union` isn't interacting well with the rest of the stubs\n # https://github.com/zen-xu/pyarrow-stubs/pull/215\n if pa.types.is_signed_integer(divided.type):\n div_type = cast("pa._lib.Int64Type", divided.type)\n has_remainder = pc.not_equal(pc.multiply(divided, right), left)\n has_one_negative_operand = pc.less(\n pc.bit_wise_xor(left, right), lit(0, div_type)\n )\n result = pc.if_else(\n pc.and_(has_remainder, has_one_negative_operand),\n pc.subtract(divided, lit(1, div_type)),\n divided,\n )\n else:\n result = divided # pragma: no cover\n result = result.cast(left.type)\n else:\n divided = pc.divide(left, right)\n result = pc.floor(divided)\n return result\n\n\ndef cast_for_truediv(\n arrow_array: ArrayOrScalarT1, pa_object: ArrayOrScalarT2\n) -> tuple[ArrayOrScalarT1, ArrayOrScalarT2]:\n # Lifted from:\n # https://github.com/pandas-dev/pandas/blob/262fcfbffcee5c3116e86a951d8b693f90411e68/pandas/core/arrays/arrow/array.py#L108-L122\n # Ensure int / int -> float mirroring Python/Numpy behavior\n # as pc.divide_checked(int, int) -> int\n if pa.types.is_integer(arrow_array.type) and pa.types.is_integer(pa_object.type):\n # GH: 56645. # noqa: ERA001\n # https://github.com/apache/arrow/issues/35563\n # NOTE: `pyarrow==11.*` doesn't allow keywords in `Array.cast`\n return pc.cast(arrow_array, pa.float64(), safe=False), pc.cast(\n pa_object, pa.float64(), safe=False\n )\n\n return arrow_array, pa_object\n\n\n# Regex for date, time, separator and timezone components\nDATE_RE = r"(?P<date>\d{1,4}[-/.]\d{1,2}[-/.]\d{1,4}|\d{8})"\nSEP_RE = r"(?P<sep>\s|T)"\nTIME_RE = r"(?P<time>\d{2}:\d{2}(?::\d{2})?|\d{6}?)" # \s*(?P<period>[AP]M)?)?\nHMS_RE = r"^(?P<hms>\d{2}:\d{2}:\d{2})$"\nHM_RE = r"^(?P<hm>\d{2}:\d{2})$"\nHMS_RE_NO_SEP = r"^(?P<hms_no_sep>\d{6})$"\nTZ_RE = r"(?P<tz>Z|[+-]\d{2}:?\d{2})" # Matches 'Z', '+02:00', '+0200', '+02', etc.\nFULL_RE = rf"{DATE_RE}{SEP_RE}?{TIME_RE}?{TZ_RE}?$"\n\n# Separate regexes for different date formats\nYMD_RE = r"^(?P<year>(?:[12][0-9])?[0-9]{2})(?P<sep1>[-/.])(?P<month>0[1-9]|1[0-2])(?P<sep2>[-/.])(?P<day>0[1-9]|[12][0-9]|3[01])$"\nDMY_RE = r"^(?P<day>0[1-9]|[12][0-9]|3[01])(?P<sep1>[-/.])(?P<month>0[1-9]|1[0-2])(?P<sep2>[-/.])(?P<year>(?:[12][0-9])?[0-9]{2})$"\nMDY_RE = r"^(?P<month>0[1-9]|1[0-2])(?P<sep1>[-/.])(?P<day>0[1-9]|[12][0-9]|3[01])(?P<sep2>[-/.])(?P<year>(?:[12][0-9])?[0-9]{2})$"\nYMD_RE_NO_SEP = r"^(?P<year>(?:[12][0-9])?[0-9]{2})(?P<month>0[1-9]|1[0-2])(?P<day>0[1-9]|[12][0-9]|3[01])$"\n\nDATE_FORMATS = (\n (YMD_RE_NO_SEP, "%Y%m%d"),\n (YMD_RE, "%Y-%m-%d"),\n (DMY_RE, "%d-%m-%Y"),\n (MDY_RE, "%m-%d-%Y"),\n)\nTIME_FORMATS = ((HMS_RE, "%H:%M:%S"), (HM_RE, "%H:%M"), (HMS_RE_NO_SEP, "%H%M%S"))\n\n\ndef _extract_regex_concat_arrays(\n strings: ChunkedArrayAny,\n /,\n pattern: str,\n *,\n options: Any = None,\n memory_pool: Any = None,\n) -> pa.StructArray:\n r = pa.concat_arrays(\n extract_regex(strings, pattern, options=options, memory_pool=memory_pool).chunks\n )\n return cast("pa.StructArray", r)\n\n\ndef parse_datetime_format(arr: ChunkedArrayAny) -> str:\n """Try to infer datetime format from StringArray."""\n matches = _extract_regex_concat_arrays(arr.drop_null().slice(0, 10), pattern=FULL_RE)\n if not pc.all(matches.is_valid()).as_py():\n msg = (\n "Unable to infer datetime format, provided format is not supported. "\n "Please report a bug to https://github.com/narwhals-dev/narwhals/issues"\n )\n raise NotImplementedError(msg)\n\n separators = matches.field("sep")\n tz = matches.field("tz")\n\n # separators and time zones must be unique\n if pc.count(pc.unique(separators)).as_py() > 1:\n msg = "Found multiple separator values while inferring datetime format."\n raise ValueError(msg)\n\n if pc.count(pc.unique(tz)).as_py() > 1:\n msg = "Found multiple timezone values while inferring datetime format."\n raise ValueError(msg)\n\n date_value = _parse_date_format(cast("pc.StringArray", matches.field("date")))\n time_value = _parse_time_format(cast("pc.StringArray", matches.field("time")))\n\n sep_value = separators[0].as_py()\n tz_value = "%z" if tz[0].as_py() else ""\n\n return f"{date_value}{sep_value}{time_value}{tz_value}"\n\n\ndef _parse_date_format(arr: pc.StringArray) -> str:\n for date_rgx, date_fmt in DATE_FORMATS:\n matches = pc.extract_regex(arr, pattern=date_rgx)\n if date_fmt == "%Y%m%d" and pc.all(matches.is_valid()).as_py():\n return date_fmt\n elif (\n pc.all(matches.is_valid()).as_py()\n and pc.count(pc.unique(sep1 := matches.field("sep1"))).as_py() == 1\n and pc.count(pc.unique(sep2 := matches.field("sep2"))).as_py() == 1\n and (date_sep_value := sep1[0].as_py()) == sep2[0].as_py()\n ):\n return date_fmt.replace("-", date_sep_value)\n\n msg = (\n "Unable to infer datetime format. "\n "Please report a bug to https://github.com/narwhals-dev/narwhals/issues"\n )\n raise ValueError(msg)\n\n\ndef _parse_time_format(arr: pc.StringArray) -> str:\n for time_rgx, time_fmt in TIME_FORMATS:\n matches = pc.extract_regex(arr, pattern=time_rgx)\n if pc.all(matches.is_valid()).as_py():\n return time_fmt\n return ""\n\n\ndef pad_series(\n series: ArrowSeries, *, window_size: int, center: bool\n) -> tuple[ArrowSeries, int]:\n """Pad series with None values on the left and/or right side, depending on the specified parameters.\n\n Arguments:\n series: The input ArrowSeries to be padded.\n window_size: The desired size of the window.\n center: Specifies whether to center the padding or not.\n\n Returns:\n A tuple containing the padded ArrowSeries and the offset value.\n """\n if not center:\n return series, 0\n offset_left = window_size // 2\n # subtract one if window_size is even\n offset_right = offset_left - (window_size % 2 == 0)\n pad_left = pa.array([None] * offset_left, type=series._type)\n pad_right = pa.array([None] * offset_right, type=series._type)\n concat = pa.concat_arrays([pad_left, *series.native.chunks, pad_right])\n return series._with_native(concat), offset_left + offset_right\n\n\ndef cast_to_comparable_string_types(\n *chunked_arrays: ChunkedArrayAny, separator: str\n) -> tuple[Iterator[ChunkedArrayAny], ScalarAny]:\n # Ensure `chunked_arrays` are either all `string` or all `large_string`.\n dtype = (\n pa.string() # (PyArrow default)\n if not any(pa.types.is_large_string(ca.type) for ca in chunked_arrays)\n else pa.large_string()\n )\n return (ca.cast(dtype) for ca in chunked_arrays), lit(separator, dtype)\n\n\nclass ArrowSeriesNamespace(_SeriesNamespace["ArrowSeries", "ChunkedArrayAny"]):\n def __init__(self, series: ArrowSeries, /) -> None:\n self._compliant_series = series\n
.venv\Lib\site-packages\narwhals\_arrow\utils.py
utils.py
Python
16,640
0.95
0.230769
0.063492
awesome-app
233
2025-01-16T14:02:46.744879
Apache-2.0
false
73810b511ef5480e75234df7cb7b9f80
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\dataframe.cpython-313.pyc
dataframe.cpython-313.pyc
Other
39,580
0.95
0.01548
0.009868
vue-tools
913
2024-06-06T11:53:44.518531
BSD-3-Clause
false
1a80123d09b32af5ebc82d2606221c81
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\expr.cpython-313.pyc
expr.cpython-313.pyc
Other
10,122
0.95
0.010309
0
node-utils
47
2024-12-02T12:46:25.183934
Apache-2.0
false
413ccd33af12e227fa617efff03623ec
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\group_by.cpython-313.pyc
group_by.cpython-313.pyc
Other
8,718
0.95
0
0.009091
awesome-app
559
2024-02-08T02:23:40.372433
GPL-3.0
false
5f651981eac559f926e82415405fbbdf
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\namespace.cpython-313.pyc
namespace.cpython-313.pyc
Other
19,233
0.95
0
0.009709
node-utils
458
2025-06-12T01:06:01.892895
MIT
false
82315dd0ad34204d273a363c29baa6e5
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\selectors.cpython-313.pyc
selectors.cpython-313.pyc
Other
1,843
0.95
0
0
awesome-app
673
2025-04-10T18:41:39.034089
MIT
false
53bbdbafe4c7c94825550046ac5ff5be
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\series.cpython-313.pyc
series.cpython-313.pyc
Other
65,577
0.75
0.007435
0.007797
python-kit
905
2024-11-15T02:16:38.505918
Apache-2.0
false
4fa3b4e20db782c9891d6c3d4a54b3f5
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\series_cat.cpython-313.pyc
series_cat.cpython-313.pyc
Other
1,428
0.8
0
0
vue-tools
225
2024-04-08T01:38:15.018857
BSD-3-Clause
false
34c5c2f3f97699abbc450638260a2ba0
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\series_dt.cpython-313.pyc
series_dt.cpython-313.pyc
Other
14,307
0.8
0
0.013333
react-lib
146
2024-11-01T10:16:11.373995
BSD-3-Clause
false
975e01c7732069604344cba1728cb082
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\series_list.cpython-313.pyc
series_list.cpython-313.pyc
Other
1,164
0.7
0
0
react-lib
108
2025-02-01T13:48:25.407515
MIT
false
e5051eacf35493c4920cc9c1aa39b92e
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\series_str.cpython-313.pyc
series_str.cpython-313.pyc
Other
7,628
0.8
0
0
vue-tools
240
2024-06-20T05:51:16.006325
Apache-2.0
false
34dc5083c1f4642cc6723b3e68e4fa80
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\series_struct.cpython-313.pyc
series_struct.cpython-313.pyc
Other
1,108
0.7
0
0
python-kit
359
2023-08-31T15:17:47.686379
BSD-3-Clause
false
8bb6cb71671cddc84227f87e9ab4e8eb
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\typing.cpython-313.pyc
typing.cpython-313.pyc
Other
2,190
0.8
0
0
python-kit
201
2025-07-07T17:36:12.700180
GPL-3.0
false
7ca01017c4c9d9d9ba42bd0bf749c18c
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
25,002
0.8
0.011236
0.005882
react-lib
589
2024-02-07T11:04:33.877929
BSD-3-Clause
false
c7f3811415a9320015308b2dfe25259a
\n\n
.venv\Lib\site-packages\narwhals\_arrow\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
190
0.7
0
0
awesome-app
808
2023-10-10T02:59:08.811058
BSD-3-Clause
false
f4a1f2fd7048b2a186e15ce423887b63
"""`Expr` and `Series` namespace accessor protocols."""\n\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Protocol\n\nfrom narwhals._utils import CompliantT_co, _StoresCompliant\n\nif TYPE_CHECKING:\n from typing import Callable\n\n from narwhals.typing import TimeUnit\n\n__all__ = [\n "CatNamespace",\n "DateTimeNamespace",\n "ListNamespace",\n "NameNamespace",\n "StringNamespace",\n "StructNamespace",\n]\n\n\nclass CatNamespace(_StoresCompliant[CompliantT_co], Protocol[CompliantT_co]):\n def get_categories(self) -> CompliantT_co: ...\n\n\nclass DateTimeNamespace(_StoresCompliant[CompliantT_co], Protocol[CompliantT_co]):\n def to_string(self, format: str) -> CompliantT_co: ...\n def replace_time_zone(self, time_zone: str | None) -> CompliantT_co: ...\n def convert_time_zone(self, time_zone: str) -> CompliantT_co: ...\n def timestamp(self, time_unit: TimeUnit) -> CompliantT_co: ...\n def date(self) -> CompliantT_co: ...\n def year(self) -> CompliantT_co: ...\n def month(self) -> CompliantT_co: ...\n def day(self) -> CompliantT_co: ...\n def hour(self) -> CompliantT_co: ...\n def minute(self) -> CompliantT_co: ...\n def second(self) -> CompliantT_co: ...\n def millisecond(self) -> CompliantT_co: ...\n def microsecond(self) -> CompliantT_co: ...\n def nanosecond(self) -> CompliantT_co: ...\n def ordinal_day(self) -> CompliantT_co: ...\n def weekday(self) -> CompliantT_co: ...\n def total_minutes(self) -> CompliantT_co: ...\n def total_seconds(self) -> CompliantT_co: ...\n def total_milliseconds(self) -> CompliantT_co: ...\n def total_microseconds(self) -> CompliantT_co: ...\n def total_nanoseconds(self) -> CompliantT_co: ...\n def truncate(self, every: str) -> CompliantT_co: ...\n\n\nclass ListNamespace(_StoresCompliant[CompliantT_co], Protocol[CompliantT_co]):\n def len(self) -> CompliantT_co: ...\n\n\nclass NameNamespace(_StoresCompliant[CompliantT_co], Protocol[CompliantT_co]):\n def keep(self) -> CompliantT_co: ...\n def map(self, function: Callable[[str], str]) -> CompliantT_co: ...\n def prefix(self, prefix: str) -> CompliantT_co: ...\n def suffix(self, suffix: str) -> CompliantT_co: ...\n def to_lowercase(self) -> CompliantT_co: ...\n def to_uppercase(self) -> CompliantT_co: ...\n\n\nclass StringNamespace(_StoresCompliant[CompliantT_co], Protocol[CompliantT_co]):\n def len_chars(self) -> CompliantT_co: ...\n def replace(\n self, pattern: str, value: str, *, literal: bool, n: int\n ) -> CompliantT_co: ...\n def replace_all(\n self, pattern: str, value: str, *, literal: bool\n ) -> CompliantT_co: ...\n def strip_chars(self, characters: str | None) -> CompliantT_co: ...\n def starts_with(self, prefix: str) -> CompliantT_co: ...\n def ends_with(self, suffix: str) -> CompliantT_co: ...\n def contains(self, pattern: str, *, literal: bool) -> CompliantT_co: ...\n def slice(self, offset: int, length: int | None) -> CompliantT_co: ...\n def split(self, by: str) -> CompliantT_co: ...\n def to_datetime(self, format: str | None) -> CompliantT_co: ...\n def to_date(self, format: str | None) -> CompliantT_co: ...\n def to_lowercase(self) -> CompliantT_co: ...\n def to_uppercase(self) -> CompliantT_co: ...\n def zfill(self, width: int) -> CompliantT_co: ...\n\n\nclass StructNamespace(_StoresCompliant[CompliantT_co], Protocol[CompliantT_co]):\n def field(self, name: str) -> CompliantT_co: ...\n
.venv\Lib\site-packages\narwhals\_compliant\any_namespace.py
any_namespace.py
Python
3,459
0.85
0.602273
0
node-utils
637
2025-03-19T07:28:45.127876
BSD-3-Clause
false
e91a5b2f1eac62d7206e3522b486a2eb
from __future__ import annotations\n\nfrom collections.abc import Iterator, Mapping, Sequence, Sized\nfrom itertools import chain\nfrom typing import TYPE_CHECKING, Any, Literal, Protocol, TypeVar, overload\n\nfrom narwhals._compliant.typing import (\n CompliantDataFrameAny,\n CompliantExprT_contra,\n CompliantLazyFrameAny,\n CompliantSeriesT,\n EagerExprT,\n EagerSeriesT,\n NativeExprT,\n NativeFrameT,\n NativeSeriesT,\n)\nfrom narwhals._translate import (\n ArrowConvertible,\n DictConvertible,\n FromNative,\n NumpyConvertible,\n ToNarwhals,\n ToNarwhalsT_co,\n)\nfrom narwhals._typing_compat import assert_never, deprecated\nfrom narwhals._utils import (\n Version,\n _StoresNative,\n check_columns_exist,\n is_compliant_series,\n is_index_selector,\n is_range,\n is_sequence_like,\n is_sized_multi_index_selector,\n is_slice_index,\n is_slice_none,\n)\n\nif TYPE_CHECKING:\n from io import BytesIO\n from pathlib import Path\n\n import pandas as pd\n import polars as pl\n import pyarrow as pa\n from typing_extensions import Self, TypeAlias\n\n from narwhals._compliant.expr import LazyExpr\n from narwhals._compliant.group_by import CompliantGroupBy, DataFrameGroupBy\n from narwhals._compliant.namespace import EagerNamespace\n from narwhals._compliant.window import WindowInputs\n from narwhals._translate import IntoArrowTable\n from narwhals._utils import Implementation, _FullContext\n from narwhals.dataframe import DataFrame\n from narwhals.dtypes import DType\n from narwhals.exceptions import ColumnNotFoundError\n from narwhals.schema import Schema\n from narwhals.typing import (\n AsofJoinStrategy,\n JoinStrategy,\n LazyUniqueKeepStrategy,\n MultiColSelector,\n MultiIndexSelector,\n PivotAgg,\n SingleIndexSelector,\n SizedMultiIndexSelector,\n SizedMultiNameSelector,\n SizeUnit,\n UniqueKeepStrategy,\n _2DArray,\n _SliceIndex,\n _SliceName,\n )\n\n Incomplete: TypeAlias = Any\n\n__all__ = ["CompliantDataFrame", "CompliantLazyFrame", "EagerDataFrame"]\n\nT = TypeVar("T")\n\n_ToDict: TypeAlias = "dict[str, CompliantSeriesT] | dict[str, list[Any]]" # noqa: PYI047\n\n\nclass CompliantDataFrame(\n NumpyConvertible["_2DArray", "_2DArray"],\n DictConvertible["_ToDict[CompliantSeriesT]", Mapping[str, Any]],\n ArrowConvertible["pa.Table", "IntoArrowTable"],\n _StoresNative[NativeFrameT],\n FromNative[NativeFrameT],\n ToNarwhals[ToNarwhalsT_co],\n Sized,\n Protocol[CompliantSeriesT, CompliantExprT_contra, NativeFrameT, ToNarwhalsT_co],\n):\n _native_frame: NativeFrameT\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n\n def __narwhals_dataframe__(self) -> Self: ...\n def __narwhals_namespace__(self) -> Any: ...\n @classmethod\n def from_arrow(cls, data: IntoArrowTable, /, *, context: _FullContext) -> Self: ...\n @classmethod\n def from_dict(\n cls,\n data: Mapping[str, Any],\n /,\n *,\n context: _FullContext,\n schema: Mapping[str, DType] | Schema | None,\n ) -> Self: ...\n @classmethod\n def from_native(cls, data: NativeFrameT, /, *, context: _FullContext) -> Self: ...\n @classmethod\n def from_numpy(\n cls,\n data: _2DArray,\n /,\n *,\n context: _FullContext,\n schema: Mapping[str, DType] | Schema | Sequence[str] | None,\n ) -> Self: ...\n\n def __array__(self, dtype: Any, *, copy: bool | None) -> _2DArray: ...\n def __getitem__(\n self,\n item: tuple[\n SingleIndexSelector | MultiIndexSelector[CompliantSeriesT],\n MultiColSelector[CompliantSeriesT],\n ],\n ) -> Self: ...\n def simple_select(self, *column_names: str) -> Self:\n """`select` where all args are column names."""\n ...\n\n def aggregate(self, *exprs: CompliantExprT_contra) -> Self:\n """`select` where all args are aggregations or literals.\n\n (so, no broadcasting is necessary).\n """\n # NOTE: Ignore is to avoid an intermittent false positive\n return self.select(*exprs) # pyright: ignore[reportArgumentType]\n\n def _with_version(self, version: Version) -> Self: ...\n\n @property\n def native(self) -> NativeFrameT:\n return self._native_frame\n\n @property\n def columns(self) -> Sequence[str]: ...\n @property\n def schema(self) -> Mapping[str, DType]: ...\n @property\n def shape(self) -> tuple[int, int]: ...\n def clone(self) -> Self: ...\n def collect(\n self, backend: Implementation | None, **kwargs: Any\n ) -> CompliantDataFrameAny: ...\n def collect_schema(self) -> Mapping[str, DType]: ...\n def drop(self, columns: Sequence[str], *, strict: bool) -> Self: ...\n def drop_nulls(self, subset: Sequence[str] | None) -> Self: ...\n def estimated_size(self, unit: SizeUnit) -> int | float: ...\n def explode(self, columns: Sequence[str]) -> Self: ...\n def filter(self, predicate: CompliantExprT_contra | Incomplete) -> Self: ...\n def gather_every(self, n: int, offset: int) -> Self: ...\n def get_column(self, name: str) -> CompliantSeriesT: ...\n def group_by(\n self,\n keys: Sequence[str] | Sequence[CompliantExprT_contra],\n *,\n drop_null_keys: bool,\n ) -> DataFrameGroupBy[Self, Any]: ...\n def head(self, n: int) -> Self: ...\n def item(self, row: int | None, column: int | str | None) -> Any: ...\n def iter_columns(self) -> Iterator[CompliantSeriesT]: ...\n def iter_rows(\n self, *, named: bool, buffer_size: int\n ) -> Iterator[tuple[Any, ...]] | Iterator[Mapping[str, Any]]: ...\n def is_unique(self) -> CompliantSeriesT: ...\n def join(\n self,\n other: Self,\n *,\n how: JoinStrategy,\n left_on: Sequence[str] | None,\n right_on: Sequence[str] | None,\n suffix: str,\n ) -> Self: ...\n def join_asof(\n self,\n other: Self,\n *,\n left_on: str,\n right_on: str,\n by_left: Sequence[str] | None,\n by_right: Sequence[str] | None,\n strategy: AsofJoinStrategy,\n suffix: str,\n ) -> Self: ...\n def lazy(self, *, backend: Implementation | None) -> CompliantLazyFrameAny: ...\n def pivot(\n self,\n on: Sequence[str],\n *,\n index: Sequence[str] | None,\n values: Sequence[str] | None,\n aggregate_function: PivotAgg | None,\n sort_columns: bool,\n separator: str,\n ) -> Self: ...\n def rename(self, mapping: Mapping[str, str]) -> Self: ...\n def row(self, index: int) -> tuple[Any, ...]: ...\n def rows(\n self, *, named: bool\n ) -> Sequence[tuple[Any, ...]] | Sequence[Mapping[str, Any]]: ...\n def sample(\n self,\n n: int | None,\n *,\n fraction: float | None,\n with_replacement: bool,\n seed: int | None,\n ) -> Self: ...\n def select(self, *exprs: CompliantExprT_contra) -> Self: ...\n def sort(\n self, *by: str, descending: bool | Sequence[bool], nulls_last: bool\n ) -> Self: ...\n def tail(self, n: int) -> Self: ...\n def to_arrow(self) -> pa.Table: ...\n def to_pandas(self) -> pd.DataFrame: ...\n def to_polars(self) -> pl.DataFrame: ...\n @overload\n def to_dict(self, *, as_series: Literal[True]) -> dict[str, CompliantSeriesT]: ...\n @overload\n def to_dict(self, *, as_series: Literal[False]) -> dict[str, list[Any]]: ...\n def to_dict(\n self, *, as_series: bool\n ) -> dict[str, CompliantSeriesT] | dict[str, list[Any]]: ...\n def unique(\n self,\n subset: Sequence[str] | None,\n *,\n keep: UniqueKeepStrategy,\n maintain_order: bool | None = None,\n ) -> Self: ...\n def unpivot(\n self,\n on: Sequence[str] | None,\n index: Sequence[str] | None,\n variable_name: str,\n value_name: str,\n ) -> Self: ...\n def with_columns(self, *exprs: CompliantExprT_contra) -> Self: ...\n def with_row_index(self, name: str, order_by: Sequence[str] | None) -> Self: ...\n @overload\n def write_csv(self, file: None) -> str: ...\n @overload\n def write_csv(self, file: str | Path | BytesIO) -> None: ...\n def write_csv(self, file: str | Path | BytesIO | None) -> str | None: ...\n def write_parquet(self, file: str | Path | BytesIO) -> None: ...\n\n def _evaluate_aliases(self, *exprs: CompliantExprT_contra) -> list[str]:\n it = (expr._evaluate_aliases(self) for expr in exprs)\n return list(chain.from_iterable(it))\n\n def _check_columns_exist(self, subset: Sequence[str]) -> ColumnNotFoundError | None:\n return check_columns_exist(subset, available=self.columns)\n\n\nclass CompliantLazyFrame(\n _StoresNative[NativeFrameT],\n FromNative[NativeFrameT],\n ToNarwhals[ToNarwhalsT_co],\n Protocol[CompliantExprT_contra, NativeFrameT, ToNarwhalsT_co],\n):\n _native_frame: NativeFrameT\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n\n def __narwhals_lazyframe__(self) -> Self: ...\n def __narwhals_namespace__(self) -> Any: ...\n\n @classmethod\n def from_native(cls, data: NativeFrameT, /, *, context: _FullContext) -> Self: ...\n\n def simple_select(self, *column_names: str) -> Self:\n """`select` where all args are column names."""\n ...\n\n def aggregate(self, *exprs: CompliantExprT_contra) -> Self:\n """`select` where all args are aggregations or literals.\n\n (so, no broadcasting is necessary).\n """\n ...\n\n def _with_version(self, version: Version) -> Self: ...\n\n @property\n def native(self) -> NativeFrameT:\n return self._native_frame\n\n @property\n def columns(self) -> Sequence[str]: ...\n @property\n def schema(self) -> Mapping[str, DType]: ...\n def _iter_columns(self) -> Iterator[Any]: ...\n def collect(\n self, backend: Implementation | None, **kwargs: Any\n ) -> CompliantDataFrameAny: ...\n def collect_schema(self) -> Mapping[str, DType]: ...\n def drop(self, columns: Sequence[str], *, strict: bool) -> Self: ...\n def drop_nulls(self, subset: Sequence[str] | None) -> Self: ...\n def explode(self, columns: Sequence[str]) -> Self: ...\n def filter(self, predicate: CompliantExprT_contra | Incomplete) -> Self: ...\n @deprecated(\n "`LazyFrame.gather_every` is deprecated and will be removed in a future version."\n )\n def gather_every(self, n: int, offset: int) -> Self: ...\n def group_by(\n self,\n keys: Sequence[str] | Sequence[CompliantExprT_contra],\n *,\n drop_null_keys: bool,\n ) -> CompliantGroupBy[Self, CompliantExprT_contra]: ...\n def head(self, n: int) -> Self: ...\n def join(\n self,\n other: Self,\n *,\n how: JoinStrategy,\n left_on: Sequence[str] | None,\n right_on: Sequence[str] | None,\n suffix: str,\n ) -> Self: ...\n def join_asof(\n self,\n other: Self,\n *,\n left_on: str,\n right_on: str,\n by_left: Sequence[str] | None,\n by_right: Sequence[str] | None,\n strategy: AsofJoinStrategy,\n suffix: str,\n ) -> Self: ...\n def rename(self, mapping: Mapping[str, str]) -> Self: ...\n def select(self, *exprs: CompliantExprT_contra) -> Self: ...\n def sort(\n self, *by: str, descending: bool | Sequence[bool], nulls_last: bool\n ) -> Self: ...\n @deprecated("`LazyFrame.tail` is deprecated and will be removed in a future version.")\n def tail(self, n: int) -> Self: ...\n def unique(\n self, subset: Sequence[str] | None, *, keep: LazyUniqueKeepStrategy\n ) -> Self: ...\n def unpivot(\n self,\n on: Sequence[str] | None,\n index: Sequence[str] | None,\n variable_name: str,\n value_name: str,\n ) -> Self: ...\n def with_columns(self, *exprs: CompliantExprT_contra) -> Self: ...\n def with_row_index(self, name: str, order_by: Sequence[str]) -> Self: ...\n def _evaluate_expr(self, expr: CompliantExprT_contra, /) -> Any:\n result = expr(self)\n assert len(result) == 1 # debug assertion # noqa: S101\n return result[0]\n\n def _evaluate_window_expr(\n self,\n expr: LazyExpr[Self, NativeExprT],\n /,\n window_inputs: WindowInputs[NativeExprT],\n ) -> NativeExprT:\n result = expr.window_function(self, window_inputs)\n assert len(result) == 1 # debug assertion # noqa: S101\n return result[0]\n\n def _evaluate_aliases(self, *exprs: CompliantExprT_contra) -> list[str]:\n it = (expr._evaluate_aliases(self) for expr in exprs)\n return list(chain.from_iterable(it))\n\n def _check_columns_exist(self, subset: Sequence[str]) -> ColumnNotFoundError | None:\n return check_columns_exist(subset, available=self.columns)\n\n\nclass EagerDataFrame(\n CompliantDataFrame[EagerSeriesT, EagerExprT, NativeFrameT, "DataFrame[NativeFrameT]"],\n CompliantLazyFrame[EagerExprT, NativeFrameT, "DataFrame[NativeFrameT]"],\n Protocol[EagerSeriesT, EagerExprT, NativeFrameT, NativeSeriesT],\n):\n def __narwhals_namespace__(\n self,\n ) -> EagerNamespace[Self, EagerSeriesT, EagerExprT, NativeFrameT, NativeSeriesT]: ...\n\n def to_narwhals(self) -> DataFrame[NativeFrameT]:\n return self._version.dataframe(self, level="full")\n\n def _with_native(\n self, df: NativeFrameT, *, validate_column_names: bool = True\n ) -> Self: ...\n\n def _evaluate_expr(self, expr: EagerExprT, /) -> EagerSeriesT:\n """Evaluate `expr` and ensure it has a **single** output."""\n result: Sequence[EagerSeriesT] = expr(self)\n assert len(result) == 1 # debug assertion # noqa: S101\n return result[0]\n\n def _evaluate_into_exprs(self, *exprs: EagerExprT) -> Sequence[EagerSeriesT]:\n # NOTE: Ignore is to avoid an intermittent false positive\n return list(chain.from_iterable(self._evaluate_into_expr(expr) for expr in exprs)) # pyright: ignore[reportArgumentType]\n\n def _evaluate_into_expr(self, expr: EagerExprT, /) -> Sequence[EagerSeriesT]:\n """Return list of raw columns.\n\n For eager backends we alias operations at each step.\n\n As a safety precaution, here we can check that the expected result names match those\n we were expecting from the various `evaluate_output_names` / `alias_output_names` calls.\n\n Note that for PySpark / DuckDB, we are less free to liberally set aliases whenever we want.\n """\n aliases = expr._evaluate_aliases(self)\n result = expr(self)\n if list(aliases) != (\n result_aliases := [s.name for s in result]\n ): # pragma: no cover\n msg = f"Safety assertion failed, expected {aliases}, got {result_aliases}"\n raise AssertionError(msg)\n return result\n\n def _extract_comparand(self, other: EagerSeriesT, /) -> Any:\n """Extract native Series, broadcasting to `len(self)` if necessary."""\n ...\n\n @staticmethod\n def _numpy_column_names(\n data: _2DArray, columns: Sequence[str] | None, /\n ) -> list[str]:\n return list(columns or (f"column_{x}" for x in range(data.shape[1])))\n\n def _gather(self, rows: SizedMultiIndexSelector[NativeSeriesT]) -> Self: ...\n def _gather_slice(self, rows: _SliceIndex | range) -> Self: ...\n def _select_multi_index(\n self, columns: SizedMultiIndexSelector[NativeSeriesT]\n ) -> Self: ...\n def _select_multi_name(\n self, columns: SizedMultiNameSelector[NativeSeriesT]\n ) -> Self: ...\n def _select_slice_index(self, columns: _SliceIndex | range) -> Self: ...\n def _select_slice_name(self, columns: _SliceName) -> Self: ...\n def __getitem__( # noqa: C901, PLR0912\n self,\n item: tuple[\n SingleIndexSelector | MultiIndexSelector[EagerSeriesT],\n MultiColSelector[EagerSeriesT],\n ],\n ) -> Self:\n rows, columns = item\n compliant = self\n if not is_slice_none(columns):\n if isinstance(columns, Sized) and len(columns) == 0:\n return compliant.select()\n if is_index_selector(columns):\n if is_slice_index(columns) or is_range(columns):\n compliant = compliant._select_slice_index(columns)\n elif is_compliant_series(columns):\n compliant = self._select_multi_index(columns.native)\n else:\n compliant = compliant._select_multi_index(columns)\n elif isinstance(columns, slice):\n compliant = compliant._select_slice_name(columns)\n elif is_compliant_series(columns):\n compliant = self._select_multi_name(columns.native)\n elif is_sequence_like(columns):\n compliant = self._select_multi_name(columns)\n else:\n assert_never(columns)\n\n if not is_slice_none(rows):\n if isinstance(rows, int):\n compliant = compliant._gather([rows])\n elif isinstance(rows, (slice, range)):\n compliant = compliant._gather_slice(rows)\n elif is_compliant_series(rows):\n compliant = compliant._gather(rows.native)\n elif is_sized_multi_index_selector(rows):\n compliant = compliant._gather(rows)\n else:\n assert_never(rows)\n\n return compliant\n
.venv\Lib\site-packages\narwhals\_compliant\dataframe.py
dataframe.py
Python
17,529
0.95
0.249497
0.028953
vue-tools
480
2024-05-07T17:30:51.469514
MIT
false
2e59039ae378fd6fe5026907a8c975c7
from __future__ import annotations\n\nfrom collections.abc import Mapping\nfrom functools import partial\nfrom operator import methodcaller\nfrom typing import TYPE_CHECKING, Any, Callable, Generic, Literal, Protocol\n\nfrom narwhals._compliant.any_namespace import (\n CatNamespace,\n DateTimeNamespace,\n ListNamespace,\n NameNamespace,\n StringNamespace,\n StructNamespace,\n)\nfrom narwhals._compliant.namespace import CompliantNamespace\nfrom narwhals._compliant.typing import (\n AliasName,\n AliasNames,\n CompliantExprT_co,\n CompliantFrameT,\n CompliantLazyFrameT,\n CompliantSeriesOrNativeExprT_co,\n EagerDataFrameT,\n EagerExprT,\n EagerSeriesT,\n LazyExprT,\n NativeExprT,\n)\nfrom narwhals._typing_compat import Protocol38, deprecated\nfrom narwhals._utils import _StoresCompliant, not_implemented\nfrom narwhals.dependencies import get_numpy, is_numpy_array\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Mapping, Sequence\n\n from typing_extensions import Self, TypeIs\n\n from narwhals._compliant.namespace import CompliantNamespace, EagerNamespace\n from narwhals._compliant.series import CompliantSeries\n from narwhals._compliant.typing import (\n AliasNames,\n EvalNames,\n EvalSeries,\n ScalarKwargs,\n WindowFunction,\n )\n from narwhals._expression_parsing import ExprKind, ExprMetadata\n from narwhals._utils import Implementation, Version, _FullContext\n from narwhals.typing import (\n FillNullStrategy,\n IntoDType,\n NonNestedLiteral,\n NumericLiteral,\n RankMethod,\n RollingInterpolationMethod,\n TemporalLiteral,\n TimeUnit,\n )\n\n__all__ = ["CompliantExpr", "EagerExpr", "LazyExpr", "NativeExpr"]\n\n\nclass NativeExpr(Protocol):\n """An `Expr`-like object from a package with [Lazy-only support](https://narwhals-dev.github.io/narwhals/extending/#levels-of-support).\n\n Protocol members are chosen *purely* for matching statically - as they\n are common to all currently supported packages.\n """\n\n def between(self, *args: Any, **kwds: Any) -> Any: ...\n def isin(self, *args: Any, **kwds: Any) -> Any: ...\n\n\nclass CompliantExpr(Protocol38[CompliantFrameT, CompliantSeriesOrNativeExprT_co]):\n _implementation: Implementation\n _backend_version: tuple[int, ...]\n _version: Version\n _evaluate_output_names: EvalNames[CompliantFrameT]\n _alias_output_names: AliasNames | None\n _metadata: ExprMetadata | None\n\n def __call__(\n self, df: CompliantFrameT\n ) -> Sequence[CompliantSeriesOrNativeExprT_co]: ...\n def __narwhals_expr__(self) -> None: ...\n def __narwhals_namespace__(self) -> CompliantNamespace[CompliantFrameT, Self]: ...\n @classmethod\n def from_column_names(\n cls,\n evaluate_column_names: EvalNames[CompliantFrameT],\n /,\n *,\n context: _FullContext,\n ) -> Self: ...\n @classmethod\n def from_column_indices(cls, *column_indices: int, context: _FullContext) -> Self: ...\n @staticmethod\n def _eval_names_indices(indices: Sequence[int], /) -> EvalNames[CompliantFrameT]:\n def fn(df: CompliantFrameT) -> Sequence[str]:\n column_names = df.columns\n return [column_names[i] for i in indices]\n\n return fn\n\n def is_null(self) -> Self: ...\n def abs(self) -> Self: ...\n def all(self) -> Self: ...\n def any(self) -> Self: ...\n def alias(self, name: str) -> Self: ...\n def cast(self, dtype: IntoDType) -> Self: ...\n def count(self) -> Self: ...\n def min(self) -> Self: ...\n def max(self) -> Self: ...\n def arg_min(self) -> Self: ...\n def arg_max(self) -> Self: ...\n def arg_true(self) -> Self: ...\n def mean(self) -> Self: ...\n def sum(self) -> Self: ...\n def median(self) -> Self: ...\n def skew(self) -> Self: ...\n def kurtosis(self) -> Self: ...\n def std(self, *, ddof: int) -> Self: ...\n def var(self, *, ddof: int) -> Self: ...\n def n_unique(self) -> Self: ...\n def null_count(self) -> Self: ...\n def drop_nulls(self) -> Self: ...\n def fill_null(\n self,\n value: Self | NonNestedLiteral,\n strategy: FillNullStrategy | None,\n limit: int | None,\n ) -> Self: ...\n def diff(self) -> Self: ...\n def exp(self) -> Self: ...\n def sqrt(self) -> Self: ...\n def unique(self) -> Self: ...\n def len(self) -> Self: ...\n def log(self, base: float) -> Self: ...\n def round(self, decimals: int) -> Self: ...\n def mode(self) -> Self: ...\n def head(self, n: int) -> Self: ...\n def tail(self, n: int) -> Self: ...\n def shift(self, n: int) -> Self: ...\n def is_finite(self) -> Self: ...\n def is_nan(self) -> Self: ...\n def is_unique(self) -> Self: ...\n def is_first_distinct(self) -> Self: ...\n def is_last_distinct(self) -> Self: ...\n def cum_sum(self, *, reverse: bool) -> Self: ...\n def cum_count(self, *, reverse: bool) -> Self: ...\n def cum_min(self, *, reverse: bool) -> Self: ...\n def cum_max(self, *, reverse: bool) -> Self: ...\n def cum_prod(self, *, reverse: bool) -> Self: ...\n def is_in(self, other: Any) -> Self: ...\n def sort(self, *, descending: bool, nulls_last: bool) -> Self: ...\n def rank(self, method: RankMethod, *, descending: bool) -> Self: ...\n def replace_strict(\n self,\n old: Sequence[Any] | Mapping[Any, Any],\n new: Sequence[Any],\n *,\n return_dtype: IntoDType | None,\n ) -> Self: ...\n def over(self, partition_by: Sequence[str], order_by: Sequence[str]) -> Self: ...\n def sample(\n self,\n n: int | None,\n *,\n fraction: float | None,\n with_replacement: bool,\n seed: int | None,\n ) -> Self: ...\n def quantile(\n self, quantile: float, interpolation: RollingInterpolationMethod\n ) -> Self: ...\n def map_batches(\n self,\n function: Callable[[CompliantSeries[Any]], CompliantExpr[Any, Any]],\n return_dtype: IntoDType | None,\n ) -> Self: ...\n\n def clip(\n self,\n lower_bound: Self | NumericLiteral | TemporalLiteral | None,\n upper_bound: Self | NumericLiteral | TemporalLiteral | None,\n ) -> Self: ...\n\n def ewm_mean(\n self,\n *,\n com: float | None,\n span: float | None,\n half_life: float | None,\n alpha: float | None,\n adjust: bool,\n min_samples: int,\n ignore_nulls: bool,\n ) -> Self: ...\n\n def rolling_sum(\n self, window_size: int, *, min_samples: int, center: bool\n ) -> Self: ...\n\n def rolling_mean(\n self, window_size: int, *, min_samples: int, center: bool\n ) -> Self: ...\n\n def rolling_var(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self: ...\n\n def rolling_std(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self: ...\n\n @deprecated("Since `1.22.0`")\n def gather_every(self, n: int, offset: int) -> Self: ...\n def __and__(self, other: Any) -> Self: ...\n def __or__(self, other: Any) -> Self: ...\n def __add__(self, other: Any) -> Self: ...\n def __sub__(self, other: Any) -> Self: ...\n def __mul__(self, other: Any) -> Self: ...\n def __floordiv__(self, other: Any) -> Self: ...\n def __truediv__(self, other: Any) -> Self: ...\n def __mod__(self, other: Any) -> Self: ...\n def __pow__(self, other: Any) -> Self: ...\n def __gt__(self, other: Any) -> Self: ...\n def __ge__(self, other: Any) -> Self: ...\n def __lt__(self, other: Any) -> Self: ...\n def __le__(self, other: Any) -> Self: ...\n def __invert__(self) -> Self: ...\n def broadcast(\n self, kind: Literal[ExprKind.AGGREGATION, ExprKind.LITERAL]\n ) -> Self: ...\n def _is_multi_output_unnamed(self) -> bool:\n """Return `True` for multi-output aggregations without names.\n\n For example, column `'a'` only appears in the output as a grouping key:\n\n df.group_by('a').agg(nw.all().sum())\n\n It does not get included in:\n\n nw.all().sum().\n """\n assert self._metadata is not None # noqa: S101\n return self._metadata.expansion_kind.is_multi_unnamed()\n\n def _evaluate_aliases(\n self: CompliantExpr[CompliantFrameT, Any], frame: CompliantFrameT, /\n ) -> Sequence[str]:\n names = self._evaluate_output_names(frame)\n return alias(names) if (alias := self._alias_output_names) else names\n\n @property\n def str(self) -> StringNamespace[Self]: ...\n @property\n def name(self) -> NameNamespace[Self]: ...\n @property\n def dt(self) -> DateTimeNamespace[Self]: ...\n @property\n def cat(self) -> CatNamespace[Self]: ...\n @property\n def list(self) -> ListNamespace[Self]: ...\n @property\n def struct(self) -> StructNamespace[Self]: ...\n\n\nclass DepthTrackingExpr(\n CompliantExpr[CompliantFrameT, CompliantSeriesOrNativeExprT_co],\n Protocol38[CompliantFrameT, CompliantSeriesOrNativeExprT_co],\n):\n _depth: int\n _function_name: str\n\n @classmethod\n def from_column_names(\n cls: type[Self],\n evaluate_column_names: EvalNames[CompliantFrameT],\n /,\n *,\n context: _FullContext,\n function_name: str = "",\n ) -> Self: ...\n\n def _is_elementary(self) -> bool:\n """Check if expr is elementary.\n\n Examples:\n - nw.col('a').mean() # depth 1\n - nw.mean('a') # depth 1\n - nw.len() # depth 0\n\n as opposed to, say\n\n - nw.col('a').filter(nw.col('b')>nw.col('c')).max()\n\n Elementary expressions are the only ones supported properly in\n pandas, PyArrow, and Dask.\n """\n return self._depth < 2\n\n def __repr__(self) -> str: # pragma: no cover\n return f"{type(self).__name__}(depth={self._depth}, function_name={self._function_name})"\n\n\nclass EagerExpr(\n DepthTrackingExpr[EagerDataFrameT, EagerSeriesT],\n Protocol38[EagerDataFrameT, EagerSeriesT],\n):\n _call: EvalSeries[EagerDataFrameT, EagerSeriesT]\n _scalar_kwargs: ScalarKwargs\n\n def __init__(\n self,\n call: EvalSeries[EagerDataFrameT, EagerSeriesT],\n *,\n depth: int,\n function_name: str,\n evaluate_output_names: EvalNames[EagerDataFrameT],\n alias_output_names: AliasNames | None,\n implementation: Implementation,\n backend_version: tuple[int, ...],\n version: Version,\n scalar_kwargs: ScalarKwargs | None = None,\n ) -> None: ...\n\n def __call__(self, df: EagerDataFrameT) -> Sequence[EagerSeriesT]:\n return self._call(df)\n\n def __narwhals_namespace__(\n self,\n ) -> EagerNamespace[EagerDataFrameT, EagerSeriesT, Self, Any, Any]: ...\n def __narwhals_expr__(self) -> None: ...\n\n @classmethod\n def _from_callable(\n cls,\n func: EvalSeries[EagerDataFrameT, EagerSeriesT],\n *,\n depth: int,\n function_name: str,\n evaluate_output_names: EvalNames[EagerDataFrameT],\n alias_output_names: AliasNames | None,\n context: _FullContext,\n scalar_kwargs: ScalarKwargs | None = None,\n ) -> Self:\n return cls(\n func,\n depth=depth,\n function_name=function_name,\n evaluate_output_names=evaluate_output_names,\n alias_output_names=alias_output_names,\n implementation=context._implementation,\n backend_version=context._backend_version,\n version=context._version,\n scalar_kwargs=scalar_kwargs,\n )\n\n @classmethod\n def _from_series(cls, series: EagerSeriesT) -> Self:\n return cls(\n lambda _df: [series],\n depth=0,\n function_name="series",\n evaluate_output_names=lambda _df: [series.name],\n alias_output_names=None,\n implementation=series._implementation,\n backend_version=series._backend_version,\n version=series._version,\n )\n\n def _reuse_series(\n self,\n method_name: str,\n *,\n returns_scalar: bool = False,\n scalar_kwargs: ScalarKwargs | None = None,\n **expressifiable_args: Any,\n ) -> Self:\n """Reuse Series implementation for expression.\n\n If Series.foo is already defined, and we'd like Expr.foo to be the same, we can\n leverage this method to do that for us.\n\n Arguments:\n method_name: name of method.\n returns_scalar: whether the Series version returns a scalar. In this case,\n the expression version should return a 1-row Series.\n scalar_kwargs: non-expressifiable args which we may need to reuse in `agg` or `over`,\n such as `ddof` for `std` and `var`.\n expressifiable_args: keyword arguments to pass to function, which may\n be expressifiable (e.g. `nw.col('a').is_between(3, nw.col('b')))`).\n """\n func = partial(\n self._reuse_series_inner,\n method_name=method_name,\n returns_scalar=returns_scalar,\n scalar_kwargs=scalar_kwargs or {},\n expressifiable_args=expressifiable_args,\n )\n return self._from_callable(\n func,\n depth=self._depth + 1,\n function_name=f"{self._function_name}->{method_name}",\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n scalar_kwargs=scalar_kwargs,\n context=self,\n )\n\n # For PyArrow.Series, we return Python Scalars (like Polars does) instead of PyArrow Scalars.\n # However, when working with expressions, we keep everything PyArrow-native.\n def _reuse_series_extra_kwargs(\n self, *, returns_scalar: bool = False\n ) -> dict[str, Any]:\n return {}\n\n @classmethod\n def _is_expr(cls, obj: Self | Any) -> TypeIs[Self]:\n return hasattr(obj, "__narwhals_expr__")\n\n def _reuse_series_inner(\n self,\n df: EagerDataFrameT,\n *,\n method_name: str,\n returns_scalar: bool,\n scalar_kwargs: ScalarKwargs,\n expressifiable_args: dict[str, Any],\n ) -> Sequence[EagerSeriesT]:\n kwargs = {\n **scalar_kwargs,\n **{\n name: df._evaluate_expr(value) if self._is_expr(value) else value\n for name, value in expressifiable_args.items()\n },\n }\n method = methodcaller(\n method_name,\n **self._reuse_series_extra_kwargs(returns_scalar=returns_scalar),\n **kwargs,\n )\n out: Sequence[EagerSeriesT] = [\n series._from_scalar(method(series)) if returns_scalar else method(series)\n for series in self(df)\n ]\n aliases = self._evaluate_aliases(df)\n if [s.name for s in out] != list(aliases): # pragma: no cover\n msg = (\n f"Safety assertion failed, please report a bug to https://github.com/narwhals-dev/narwhals/issues\n"\n f"Expression aliases: {aliases}\n"\n f"Series names: {[s.name for s in out]}"\n )\n raise AssertionError(msg)\n return out\n\n def _reuse_series_namespace(\n self,\n series_namespace: Literal["cat", "dt", "list", "name", "str", "struct"],\n method_name: str,\n **kwargs: Any,\n ) -> Self:\n """Reuse Series implementation for expression.\n\n Just like `_reuse_series`, but for e.g. `Expr.dt.foo` instead\n of `Expr.foo`.\n\n Arguments:\n series_namespace: The Series namespace.\n method_name: name of method, within `series_namespace`.\n kwargs: keyword arguments to pass to function.\n """\n return self._from_callable(\n lambda df: [\n getattr(getattr(series, series_namespace), method_name)(**kwargs)\n for series in self(df)\n ],\n depth=self._depth + 1,\n function_name=f"{self._function_name}->{series_namespace}.{method_name}",\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n scalar_kwargs=self._scalar_kwargs,\n context=self,\n )\n\n def broadcast(self, kind: Literal[ExprKind.AGGREGATION, ExprKind.LITERAL]) -> Self:\n # Mark the resulting Series with `_broadcast = True`.\n # Then, when extracting native objects, `extract_native` will\n # know what to do.\n def func(df: EagerDataFrameT) -> list[EagerSeriesT]:\n results = []\n for result in self(df):\n result._broadcast = True\n results.append(result)\n return results\n\n return type(self)(\n func,\n depth=self._depth,\n function_name=self._function_name,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n backend_version=self._backend_version,\n implementation=self._implementation,\n version=self._version,\n scalar_kwargs=self._scalar_kwargs,\n )\n\n def cast(self, dtype: IntoDType) -> Self:\n return self._reuse_series("cast", dtype=dtype)\n\n def __eq__(self, other: Self | Any) -> Self: # type: ignore[override]\n return self._reuse_series("__eq__", other=other)\n\n def __ne__(self, other: Self | Any) -> Self: # type: ignore[override]\n return self._reuse_series("__ne__", other=other)\n\n def __ge__(self, other: Self | Any) -> Self:\n return self._reuse_series("__ge__", other=other)\n\n def __gt__(self, other: Self | Any) -> Self:\n return self._reuse_series("__gt__", other=other)\n\n def __le__(self, other: Self | Any) -> Self:\n return self._reuse_series("__le__", other=other)\n\n def __lt__(self, other: Self | Any) -> Self:\n return self._reuse_series("__lt__", other=other)\n\n def __and__(self, other: Self | bool | Any) -> Self:\n return self._reuse_series("__and__", other=other)\n\n def __or__(self, other: Self | bool | Any) -> Self:\n return self._reuse_series("__or__", other=other)\n\n def __add__(self, other: Self | Any) -> Self:\n return self._reuse_series("__add__", other=other)\n\n def __sub__(self, other: Self | Any) -> Self:\n return self._reuse_series("__sub__", other=other)\n\n def __rsub__(self, other: Self | Any) -> Self:\n return self.alias("literal")._reuse_series("__rsub__", other=other)\n\n def __mul__(self, other: Self | Any) -> Self:\n return self._reuse_series("__mul__", other=other)\n\n def __truediv__(self, other: Self | Any) -> Self:\n return self._reuse_series("__truediv__", other=other)\n\n def __rtruediv__(self, other: Self | Any) -> Self:\n return self.alias("literal")._reuse_series("__rtruediv__", other=other)\n\n def __floordiv__(self, other: Self | Any) -> Self:\n return self._reuse_series("__floordiv__", other=other)\n\n def __rfloordiv__(self, other: Self | Any) -> Self:\n return self.alias("literal")._reuse_series("__rfloordiv__", other=other)\n\n def __pow__(self, other: Self | Any) -> Self:\n return self._reuse_series("__pow__", other=other)\n\n def __rpow__(self, other: Self | Any) -> Self:\n return self.alias("literal")._reuse_series("__rpow__", other=other)\n\n def __mod__(self, other: Self | Any) -> Self:\n return self._reuse_series("__mod__", other=other)\n\n def __rmod__(self, other: Self | Any) -> Self:\n return self.alias("literal")._reuse_series("__rmod__", other=other)\n\n # Unary\n def __invert__(self) -> Self:\n return self._reuse_series("__invert__")\n\n # Reductions\n def null_count(self) -> Self:\n return self._reuse_series("null_count", returns_scalar=True)\n\n def n_unique(self) -> Self:\n return self._reuse_series("n_unique", returns_scalar=True)\n\n def sum(self) -> Self:\n return self._reuse_series("sum", returns_scalar=True)\n\n def count(self) -> Self:\n return self._reuse_series("count", returns_scalar=True)\n\n def mean(self) -> Self:\n return self._reuse_series("mean", returns_scalar=True)\n\n def median(self) -> Self:\n return self._reuse_series("median", returns_scalar=True)\n\n def std(self, *, ddof: int) -> Self:\n return self._reuse_series(\n "std", returns_scalar=True, scalar_kwargs={"ddof": ddof}\n )\n\n def var(self, *, ddof: int) -> Self:\n return self._reuse_series(\n "var", returns_scalar=True, scalar_kwargs={"ddof": ddof}\n )\n\n def skew(self) -> Self:\n return self._reuse_series("skew", returns_scalar=True)\n\n def kurtosis(self) -> Self:\n return self._reuse_series("kurtosis", returns_scalar=True)\n\n def any(self) -> Self:\n return self._reuse_series("any", returns_scalar=True)\n\n def all(self) -> Self:\n return self._reuse_series("all", returns_scalar=True)\n\n def max(self) -> Self:\n return self._reuse_series("max", returns_scalar=True)\n\n def min(self) -> Self:\n return self._reuse_series("min", returns_scalar=True)\n\n def arg_min(self) -> Self:\n return self._reuse_series("arg_min", returns_scalar=True)\n\n def arg_max(self) -> Self:\n return self._reuse_series("arg_max", returns_scalar=True)\n\n # Other\n\n def clip(\n self,\n lower_bound: Self | NumericLiteral | TemporalLiteral | None,\n upper_bound: Self | NumericLiteral | TemporalLiteral | None,\n ) -> Self:\n return self._reuse_series(\n "clip", lower_bound=lower_bound, upper_bound=upper_bound\n )\n\n def is_null(self) -> Self:\n return self._reuse_series("is_null")\n\n def is_nan(self) -> Self:\n return self._reuse_series("is_nan")\n\n def fill_null(\n self,\n value: Self | NonNestedLiteral,\n strategy: FillNullStrategy | None,\n limit: int | None,\n ) -> Self:\n return self._reuse_series(\n "fill_null", value=value, strategy=strategy, limit=limit\n )\n\n def is_in(self, other: Any) -> Self:\n return self._reuse_series("is_in", other=other)\n\n def arg_true(self) -> Self:\n return self._reuse_series("arg_true")\n\n def filter(self, *predicates: Self) -> Self:\n plx = self.__narwhals_namespace__()\n predicate = plx.all_horizontal(*predicates, ignore_nulls=False)\n return self._reuse_series("filter", predicate=predicate)\n\n def drop_nulls(self) -> Self:\n return self._reuse_series("drop_nulls")\n\n def replace_strict(\n self,\n old: Sequence[Any] | Mapping[Any, Any],\n new: Sequence[Any],\n *,\n return_dtype: IntoDType | None,\n ) -> Self:\n return self._reuse_series(\n "replace_strict", old=old, new=new, return_dtype=return_dtype\n )\n\n def sort(self, *, descending: bool, nulls_last: bool) -> Self:\n return self._reuse_series("sort", descending=descending, nulls_last=nulls_last)\n\n def abs(self) -> Self:\n return self._reuse_series("abs")\n\n def unique(self) -> Self:\n return self._reuse_series("unique", maintain_order=False)\n\n def diff(self) -> Self:\n return self._reuse_series("diff")\n\n def sample(\n self,\n n: int | None,\n *,\n fraction: float | None,\n with_replacement: bool,\n seed: int | None,\n ) -> Self:\n return self._reuse_series(\n "sample", n=n, fraction=fraction, with_replacement=with_replacement, seed=seed\n )\n\n def alias(self, name: str) -> Self:\n def alias_output_names(names: Sequence[str]) -> Sequence[str]:\n if len(names) != 1:\n msg = f"Expected function with single output, found output names: {names}"\n raise ValueError(msg)\n return [name]\n\n # Define this one manually, so that we can\n # override `output_names` and not increase depth\n return type(self)(\n lambda df: [series.alias(name) for series in self(df)],\n depth=self._depth,\n function_name=self._function_name,\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=alias_output_names,\n backend_version=self._backend_version,\n implementation=self._implementation,\n version=self._version,\n scalar_kwargs=self._scalar_kwargs,\n )\n\n def is_unique(self) -> Self:\n return self._reuse_series("is_unique")\n\n def is_first_distinct(self) -> Self:\n return self._reuse_series("is_first_distinct")\n\n def is_last_distinct(self) -> Self:\n return self._reuse_series("is_last_distinct")\n\n def quantile(\n self, quantile: float, interpolation: RollingInterpolationMethod\n ) -> Self:\n return self._reuse_series(\n "quantile",\n quantile=quantile,\n interpolation=interpolation,\n returns_scalar=True,\n )\n\n def head(self, n: int) -> Self:\n return self._reuse_series("head", n=n)\n\n def tail(self, n: int) -> Self:\n return self._reuse_series("tail", n=n)\n\n def round(self, decimals: int) -> Self:\n return self._reuse_series("round", decimals=decimals)\n\n def len(self) -> Self:\n return self._reuse_series("len", returns_scalar=True)\n\n def gather_every(self, n: int, offset: int) -> Self:\n return self._reuse_series("gather_every", n=n, offset=offset)\n\n def mode(self) -> Self:\n return self._reuse_series("mode")\n\n def is_finite(self) -> Self:\n return self._reuse_series("is_finite")\n\n def rolling_mean(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._reuse_series(\n "rolling_mean",\n window_size=window_size,\n min_samples=min_samples,\n center=center,\n )\n\n def rolling_std(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n return self._reuse_series(\n "rolling_std",\n window_size=window_size,\n min_samples=min_samples,\n center=center,\n ddof=ddof,\n )\n\n def rolling_sum(self, window_size: int, *, min_samples: int, center: bool) -> Self:\n return self._reuse_series(\n "rolling_sum", window_size=window_size, min_samples=min_samples, center=center\n )\n\n def rolling_var(\n self, window_size: int, *, min_samples: int, center: bool, ddof: int\n ) -> Self:\n return self._reuse_series(\n "rolling_var",\n window_size=window_size,\n min_samples=min_samples,\n center=center,\n ddof=ddof,\n )\n\n def map_batches(\n self, function: Callable[[Any], Any], return_dtype: IntoDType | None\n ) -> Self:\n def func(df: EagerDataFrameT) -> Sequence[EagerSeriesT]:\n input_series_list = self(df)\n output_names = [input_series.name for input_series in input_series_list]\n result = [function(series) for series in input_series_list]\n if is_numpy_array(result[0]) or (\n (np := get_numpy()) is not None and np.isscalar(result[0])\n ):\n from_numpy = partial(\n self.__narwhals_namespace__()._series.from_numpy, context=self\n )\n result = [\n from_numpy(array).alias(output_name)\n for array, output_name in zip(result, output_names)\n ]\n if return_dtype is not None:\n result = [series.cast(return_dtype) for series in result]\n return result\n\n return self._from_callable(\n func,\n depth=self._depth + 1,\n function_name=self._function_name + "->map_batches",\n evaluate_output_names=self._evaluate_output_names,\n alias_output_names=self._alias_output_names,\n context=self,\n )\n\n @property\n def cat(self) -> EagerExprCatNamespace[Self]:\n return EagerExprCatNamespace(self)\n\n @property\n def dt(self) -> EagerExprDateTimeNamespace[Self]:\n return EagerExprDateTimeNamespace(self)\n\n @property\n def list(self) -> EagerExprListNamespace[Self]:\n return EagerExprListNamespace(self)\n\n @property\n def name(self) -> EagerExprNameNamespace[Self]:\n return EagerExprNameNamespace(self)\n\n @property\n def str(self) -> EagerExprStringNamespace[Self]:\n return EagerExprStringNamespace(self)\n\n @property\n def struct(self) -> EagerExprStructNamespace[Self]:\n return EagerExprStructNamespace(self)\n\n\nclass LazyExpr(\n CompliantExpr[CompliantLazyFrameT, NativeExprT],\n Protocol38[CompliantLazyFrameT, NativeExprT],\n):\n arg_min: not_implemented = not_implemented()\n arg_max: not_implemented = not_implemented()\n arg_true: not_implemented = not_implemented()\n head: not_implemented = not_implemented()\n tail: not_implemented = not_implemented()\n mode: not_implemented = not_implemented()\n sort: not_implemented = not_implemented()\n sample: not_implemented = not_implemented()\n map_batches: not_implemented = not_implemented()\n ewm_mean: not_implemented = not_implemented()\n gather_every: not_implemented = not_implemented()\n replace_strict: not_implemented = not_implemented()\n cat: not_implemented = not_implemented() # type: ignore[assignment]\n\n @property\n def window_function(self) -> WindowFunction[CompliantLazyFrameT, NativeExprT]: ...\n\n @classmethod\n def _is_expr(cls, obj: Self | Any) -> TypeIs[Self]:\n return hasattr(obj, "__narwhals_expr__")\n\n def _with_callable(self, call: Callable[..., Any], /) -> Self: ...\n def _with_alias_output_names(self, func: AliasNames | None, /) -> Self: ...\n def alias(self, name: str) -> Self:\n def fn(names: Sequence[str]) -> Sequence[str]:\n if len(names) != 1:\n msg = f"Expected function with single output, found output names: {names}"\n raise ValueError(msg)\n return [name]\n\n return self._with_alias_output_names(fn)\n\n @classmethod\n def _alias_native(cls, expr: NativeExprT, name: str, /) -> NativeExprT: ...\n\n @classmethod\n def _from_elementwise_horizontal_op(\n cls, func: Callable[[Iterable[NativeExprT]], NativeExprT], *exprs: Self\n ) -> Self: ...\n\n @property\n def name(self) -> LazyExprNameNamespace[Self]:\n return LazyExprNameNamespace(self)\n\n def _with_binary(self, op: Callable[..., NativeExprT], other: Self | Any) -> Self: ...\n\n def __eq__(self, other: Self) -> Self: # type: ignore[override]\n return self._with_binary(lambda expr, other: expr.__eq__(other), other)\n\n def __ne__(self, other: Self) -> Self: # type: ignore[override]\n return self._with_binary(lambda expr, other: expr.__ne__(other), other)\n\n def __add__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__add__(other), other)\n\n def __sub__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__sub__(other), other)\n\n def __rsub__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: other - expr, other).alias("literal")\n\n def __mul__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__mul__(other), other)\n\n def __truediv__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__truediv__(other), other)\n\n def __rtruediv__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: other / expr, other).alias("literal")\n\n def __floordiv__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__floordiv__(other), other)\n\n def __rfloordiv__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: other // expr, other).alias(\n "literal"\n )\n\n def __pow__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__pow__(other), other)\n\n def __rpow__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: other**expr, other).alias("literal")\n\n def __mod__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__mod__(other), other)\n\n def __rmod__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: other % expr, other).alias("literal")\n\n def __ge__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__ge__(other), other)\n\n def __gt__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__gt__(other), other)\n\n def __le__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__le__(other), other)\n\n def __lt__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__lt__(other), other)\n\n def __and__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__and__(other), other)\n\n def __or__(self, other: Self) -> Self:\n return self._with_binary(lambda expr, other: expr.__or__(other), other)\n\n\nclass _ExprNamespace( # type: ignore[misc]\n _StoresCompliant[CompliantExprT_co], Protocol[CompliantExprT_co]\n):\n _compliant_expr: CompliantExprT_co\n\n @property\n def compliant(self) -> CompliantExprT_co:\n return self._compliant_expr\n\n\nclass EagerExprNamespace(_ExprNamespace[EagerExprT], Generic[EagerExprT]):\n def __init__(self, expr: EagerExprT, /) -> None:\n self._compliant_expr = expr\n\n\nclass LazyExprNamespace(_ExprNamespace[LazyExprT], Generic[LazyExprT]):\n def __init__(self, expr: LazyExprT, /) -> None:\n self._compliant_expr = expr\n\n\nclass EagerExprCatNamespace(\n EagerExprNamespace[EagerExprT], CatNamespace[EagerExprT], Generic[EagerExprT]\n):\n def get_categories(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("cat", "get_categories")\n\n\nclass EagerExprDateTimeNamespace(\n EagerExprNamespace[EagerExprT], DateTimeNamespace[EagerExprT], Generic[EagerExprT]\n):\n def to_string(self, format: str) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "to_string", format=format)\n\n def replace_time_zone(self, time_zone: str | None) -> EagerExprT:\n return self.compliant._reuse_series_namespace(\n "dt", "replace_time_zone", time_zone=time_zone\n )\n\n def convert_time_zone(self, time_zone: str) -> EagerExprT:\n return self.compliant._reuse_series_namespace(\n "dt", "convert_time_zone", time_zone=time_zone\n )\n\n def timestamp(self, time_unit: TimeUnit) -> EagerExprT:\n return self.compliant._reuse_series_namespace(\n "dt", "timestamp", time_unit=time_unit\n )\n\n def date(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "date")\n\n def year(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "year")\n\n def month(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "month")\n\n def day(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "day")\n\n def hour(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "hour")\n\n def minute(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "minute")\n\n def second(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "second")\n\n def millisecond(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "millisecond")\n\n def microsecond(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "microsecond")\n\n def nanosecond(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "nanosecond")\n\n def ordinal_day(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "ordinal_day")\n\n def weekday(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "weekday")\n\n def total_minutes(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "total_minutes")\n\n def total_seconds(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "total_seconds")\n\n def total_milliseconds(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "total_milliseconds")\n\n def total_microseconds(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "total_microseconds")\n\n def total_nanoseconds(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "total_nanoseconds")\n\n def truncate(self, every: str) -> EagerExprT:\n return self.compliant._reuse_series_namespace("dt", "truncate", every=every)\n\n\nclass EagerExprListNamespace(\n EagerExprNamespace[EagerExprT], ListNamespace[EagerExprT], Generic[EagerExprT]\n):\n def len(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("list", "len")\n\n\nclass CompliantExprNameNamespace( # type: ignore[misc]\n _ExprNamespace[CompliantExprT_co],\n NameNamespace[CompliantExprT_co],\n Protocol[CompliantExprT_co],\n):\n def keep(self) -> CompliantExprT_co:\n return self._from_callable(lambda name: name, alias=False)\n\n def map(self, function: AliasName) -> CompliantExprT_co:\n return self._from_callable(function)\n\n def prefix(self, prefix: str) -> CompliantExprT_co:\n return self._from_callable(lambda name: f"{prefix}{name}")\n\n def suffix(self, suffix: str) -> CompliantExprT_co:\n return self._from_callable(lambda name: f"{name}{suffix}")\n\n def to_lowercase(self) -> CompliantExprT_co:\n return self._from_callable(str.lower)\n\n def to_uppercase(self) -> CompliantExprT_co:\n return self._from_callable(str.upper)\n\n @staticmethod\n def _alias_output_names(func: AliasName, /) -> AliasNames:\n def fn(output_names: Sequence[str], /) -> Sequence[str]:\n return [func(name) for name in output_names]\n\n return fn\n\n def _from_callable(\n self, func: AliasName, /, *, alias: bool = True\n ) -> CompliantExprT_co: ...\n\n\nclass EagerExprNameNamespace(\n EagerExprNamespace[EagerExprT],\n CompliantExprNameNamespace[EagerExprT],\n Generic[EagerExprT],\n):\n def _from_callable(self, func: AliasName, /, *, alias: bool = True) -> EagerExprT:\n expr = self.compliant\n return type(expr)(\n lambda df: [\n series.alias(func(name))\n for series, name in zip(expr(df), expr._evaluate_output_names(df))\n ],\n depth=expr._depth,\n function_name=expr._function_name,\n evaluate_output_names=expr._evaluate_output_names,\n alias_output_names=self._alias_output_names(func) if alias else None,\n backend_version=expr._backend_version,\n implementation=expr._implementation,\n version=expr._version,\n scalar_kwargs=expr._scalar_kwargs,\n )\n\n\nclass LazyExprNameNamespace(\n LazyExprNamespace[LazyExprT],\n CompliantExprNameNamespace[LazyExprT],\n Generic[LazyExprT],\n):\n def _from_callable(self, func: AliasName, /, *, alias: bool = True) -> LazyExprT:\n expr = self.compliant\n output_names = self._alias_output_names(func) if alias else None\n return expr._with_alias_output_names(output_names)\n\n\nclass EagerExprStringNamespace(\n EagerExprNamespace[EagerExprT], StringNamespace[EagerExprT], Generic[EagerExprT]\n):\n def len_chars(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("str", "len_chars")\n\n def replace(self, pattern: str, value: str, *, literal: bool, n: int) -> EagerExprT:\n return self.compliant._reuse_series_namespace(\n "str", "replace", pattern=pattern, value=value, literal=literal, n=n\n )\n\n def replace_all(self, pattern: str, value: str, *, literal: bool) -> EagerExprT:\n return self.compliant._reuse_series_namespace(\n "str", "replace_all", pattern=pattern, value=value, literal=literal\n )\n\n def strip_chars(self, characters: str | None) -> EagerExprT:\n return self.compliant._reuse_series_namespace(\n "str", "strip_chars", characters=characters\n )\n\n def starts_with(self, prefix: str) -> EagerExprT:\n return self.compliant._reuse_series_namespace("str", "starts_with", prefix=prefix)\n\n def ends_with(self, suffix: str) -> EagerExprT:\n return self.compliant._reuse_series_namespace("str", "ends_with", suffix=suffix)\n\n def contains(self, pattern: str, *, literal: bool) -> EagerExprT:\n return self.compliant._reuse_series_namespace(\n "str", "contains", pattern=pattern, literal=literal\n )\n\n def slice(self, offset: int, length: int | None) -> EagerExprT:\n return self.compliant._reuse_series_namespace(\n "str", "slice", offset=offset, length=length\n )\n\n def split(self, by: str) -> EagerExprT:\n return self.compliant._reuse_series_namespace("str", "split", by=by)\n\n def to_datetime(self, format: str | None) -> EagerExprT:\n return self.compliant._reuse_series_namespace("str", "to_datetime", format=format)\n\n def to_date(self, format: str | None) -> EagerExprT:\n return self.compliant._reuse_series_namespace("str", "to_date", format=format)\n\n def to_lowercase(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("str", "to_lowercase")\n\n def to_uppercase(self) -> EagerExprT:\n return self.compliant._reuse_series_namespace("str", "to_uppercase")\n\n def zfill(self, width: int) -> EagerExprT:\n return self.compliant._reuse_series_namespace("str", "zfill", width=width)\n\n\nclass EagerExprStructNamespace(\n EagerExprNamespace[EagerExprT], StructNamespace[EagerExprT], Generic[EagerExprT]\n):\n def field(self, name: str) -> EagerExprT:\n return self.compliant._reuse_series_namespace("struct", "field", name=name).alias(\n name\n )\n
.venv\Lib\site-packages\narwhals\_compliant\expr.py
expr.py
Python
42,464
0.95
0.268152
0.027439
node-utils
312
2023-07-14T03:53:07.780138
Apache-2.0
false
73c520ad02fdcb9ca551c4eaffac7b77
from __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING, Any, Callable, ClassVar, Literal, TypeVar\n\nfrom narwhals._compliant.typing import (\n CompliantDataFrameAny,\n CompliantDataFrameT_co,\n CompliantExprT_contra,\n CompliantFrameT_co,\n CompliantLazyFrameAny,\n CompliantLazyFrameT_co,\n DepthTrackingExprAny,\n DepthTrackingExprT_contra,\n EagerExprT_contra,\n LazyExprT_contra,\n NativeExprT_co,\n)\nfrom narwhals._typing_compat import Protocol38\nfrom narwhals._utils import is_sequence_of\n\nif TYPE_CHECKING:\n from collections.abc import Iterable, Iterator, Mapping, Sequence\n\n from typing_extensions import TypeAlias\n\n _SameFrameT = TypeVar("_SameFrameT", CompliantDataFrameAny, CompliantLazyFrameAny)\n\n\n__all__ = [\n "CompliantGroupBy",\n "DepthTrackingGroupBy",\n "EagerGroupBy",\n "LazyGroupBy",\n "NarwhalsAggregation",\n]\n\nNativeAggregationT_co = TypeVar(\n "NativeAggregationT_co", bound="str | Callable[..., Any]", covariant=True\n)\nNarwhalsAggregation: TypeAlias = Literal[\n "sum", "mean", "median", "max", "min", "std", "var", "len", "n_unique", "count"\n]\n\n\n_RE_LEAF_NAME: re.Pattern[str] = re.compile(r"(\w+->)")\n\n\nclass CompliantGroupBy(Protocol38[CompliantFrameT_co, CompliantExprT_contra]):\n _compliant_frame: Any\n\n @property\n def compliant(self) -> CompliantFrameT_co:\n return self._compliant_frame # type: ignore[no-any-return]\n\n def __init__(\n self,\n compliant_frame: CompliantFrameT_co,\n keys: Sequence[CompliantExprT_contra] | Sequence[str],\n /,\n *,\n drop_null_keys: bool,\n ) -> None: ...\n\n def agg(self, *exprs: CompliantExprT_contra) -> CompliantFrameT_co: ...\n\n\nclass DataFrameGroupBy(\n CompliantGroupBy[CompliantDataFrameT_co, CompliantExprT_contra],\n Protocol38[CompliantDataFrameT_co, CompliantExprT_contra],\n):\n def __iter__(self) -> Iterator[tuple[Any, CompliantDataFrameT_co]]: ...\n\n\nclass ParseKeysGroupBy(\n CompliantGroupBy[CompliantFrameT_co, CompliantExprT_contra],\n Protocol38[CompliantFrameT_co, CompliantExprT_contra],\n):\n def _parse_keys(\n self,\n compliant_frame: _SameFrameT,\n keys: Sequence[CompliantExprT_contra] | Sequence[str],\n ) -> tuple[_SameFrameT, list[str], list[str]]:\n if is_sequence_of(keys, str):\n keys_str = list(keys)\n return compliant_frame, keys_str, keys_str.copy()\n else:\n return self._parse_expr_keys(compliant_frame, keys=keys)\n\n @staticmethod\n def _parse_expr_keys(\n compliant_frame: _SameFrameT, keys: Sequence[CompliantExprT_contra]\n ) -> tuple[_SameFrameT, list[str], list[str]]:\n """Parses key expressions to set up `.agg` operation with correct information.\n\n Since keys are expressions, it's possible to alias any such key to match\n other dataframe column names.\n\n In order to match polars behavior and not overwrite columns when evaluating keys:\n\n - We evaluate what the output key names should be, in order to remap temporary column\n names to the expected ones, and to exclude those from unnamed expressions in\n `.agg(...)` context (see https://github.com/narwhals-dev/narwhals/pull/2325#issuecomment-2800004520)\n - Create temporary names for evaluated key expressions that are guaranteed to have\n no overlap with any existing column name.\n - Add these temporary columns to the compliant dataframe.\n """\n tmp_name_length = max(len(str(c)) for c in compliant_frame.columns) + 1\n\n def _temporary_name(key: str) -> str:\n # 5 is the length of `__tmp`\n key_str = str(key) # pandas allows non-string column names :sob:\n return f"_{key_str}_tmp{'_' * (tmp_name_length - len(key_str) - 5)}"\n\n output_names = compliant_frame._evaluate_aliases(*keys)\n\n safe_keys = [\n # multi-output expression cannot have duplicate names, hence it's safe to suffix\n key.name.map(_temporary_name)\n if (metadata := key._metadata) and metadata.expansion_kind.is_multi_output()\n # otherwise it's single named and we can use Expr.alias\n else key.alias(_temporary_name(new_name))\n for key, new_name in zip(keys, output_names)\n ]\n return (\n compliant_frame.with_columns(*safe_keys),\n compliant_frame._evaluate_aliases(*safe_keys),\n output_names,\n )\n\n\nclass DepthTrackingGroupBy(\n ParseKeysGroupBy[CompliantFrameT_co, DepthTrackingExprT_contra],\n Protocol38[CompliantFrameT_co, DepthTrackingExprT_contra, NativeAggregationT_co],\n):\n """`CompliantGroupBy` variant, deals with `Eager` and other backends that utilize `CompliantExpr._depth`."""\n\n _REMAP_AGGS: ClassVar[Mapping[NarwhalsAggregation, Any]]\n """Mapping from `narwhals` to native representation.\n\n Note:\n - `Dask` *may* return a `Callable` instead of a `str` referring to one.\n """\n\n def _ensure_all_simple(self, exprs: Sequence[DepthTrackingExprT_contra]) -> None:\n for expr in exprs:\n if not self._is_simple(expr):\n name = self.compliant._implementation.name.lower()\n msg = (\n f"Non-trivial complex aggregation found.\n\n"\n f"Hint: you were probably trying to apply a non-elementary aggregation with a"\n f"{name!r} table.\n"\n "Please rewrite your query such that group-by aggregations "\n "are elementary. For example, instead of:\n\n"\n " df.group_by('a').agg(nw.col('b').round(2).mean())\n\n"\n "use:\n\n"\n " df.with_columns(nw.col('b').round(2)).group_by('a').agg(nw.col('b').mean())\n\n"\n )\n raise ValueError(msg)\n\n @classmethod\n def _is_simple(cls, expr: DepthTrackingExprAny, /) -> bool:\n """Return `True` is we can efficiently use `expr` in a native `group_by` context."""\n return expr._is_elementary() and cls._leaf_name(expr) in cls._REMAP_AGGS\n\n @classmethod\n def _remap_expr_name(\n cls, name: NarwhalsAggregation | Any, /\n ) -> NativeAggregationT_co:\n """Replace `name`, with some native representation.\n\n Arguments:\n name: Name of a `nw.Expr` aggregation method.\n\n Returns:\n A native compatible representation.\n """\n return cls._REMAP_AGGS.get(name, name)\n\n @classmethod\n def _leaf_name(cls, expr: DepthTrackingExprAny, /) -> NarwhalsAggregation | Any:\n """Return the last function name in the chain defined by `expr`."""\n return _RE_LEAF_NAME.sub("", expr._function_name)\n\n\nclass EagerGroupBy(\n DepthTrackingGroupBy[\n CompliantDataFrameT_co, EagerExprT_contra, NativeAggregationT_co\n ],\n DataFrameGroupBy[CompliantDataFrameT_co, EagerExprT_contra],\n Protocol38[CompliantDataFrameT_co, EagerExprT_contra, NativeAggregationT_co],\n): ...\n\n\nclass LazyGroupBy(\n ParseKeysGroupBy[CompliantLazyFrameT_co, LazyExprT_contra],\n CompliantGroupBy[CompliantLazyFrameT_co, LazyExprT_contra],\n Protocol38[CompliantLazyFrameT_co, LazyExprT_contra, NativeExprT_co],\n):\n _keys: list[str]\n _output_key_names: list[str]\n\n def _evaluate_expr(self, expr: LazyExprT_contra, /) -> Iterator[NativeExprT_co]:\n output_names = expr._evaluate_output_names(self.compliant)\n aliases = (\n expr._alias_output_names(output_names)\n if expr._alias_output_names\n else output_names\n )\n native_exprs = expr(self.compliant)\n if expr._is_multi_output_unnamed():\n exclude = {*self._keys, *self._output_key_names}\n for native_expr, name, alias in zip(native_exprs, output_names, aliases):\n if name not in exclude:\n yield expr._alias_native(native_expr, alias)\n else:\n for native_expr, alias in zip(native_exprs, aliases):\n yield expr._alias_native(native_expr, alias)\n\n def _evaluate_exprs(\n self, exprs: Iterable[LazyExprT_contra], /\n ) -> Iterator[NativeExprT_co]:\n for expr in exprs:\n yield from self._evaluate_expr(expr)\n
.venv\Lib\site-packages\narwhals\_compliant\group_by.py
group_by.py
Python
8,303
0.95
0.151786
0.021978
node-utils
693
2023-08-20T23:43:31.922962
GPL-3.0
false
1d976925d6c26e35b0dd93f15d5218f8