koichi12 commited on
Commit
114999c
·
verified ·
1 Parent(s): f6931f8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. .venv/lib/python3.11/site-packages/click-8.1.8.dist-info/INSTALLER +1 -0
  3. .venv/lib/python3.11/site-packages/click-8.1.8.dist-info/LICENSE.txt +28 -0
  4. .venv/lib/python3.11/site-packages/click-8.1.8.dist-info/METADATA +74 -0
  5. .venv/lib/python3.11/site-packages/click-8.1.8.dist-info/RECORD +38 -0
  6. .venv/lib/python3.11/site-packages/click-8.1.8.dist-info/WHEEL +4 -0
  7. .venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/INSTALLER +1 -0
  8. .venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/METADATA +58 -0
  9. .venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/RECORD +24 -0
  10. .venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/WHEEL +4 -0
  11. .venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/licenses/LICENSE +24 -0
  12. .venv/lib/python3.11/site-packages/numpy/__init__.cython-30.pxd +1050 -0
  13. .venv/lib/python3.11/site-packages/numpy/_distributor_init.py +15 -0
  14. .venv/lib/python3.11/site-packages/numpy/_pytesttester.py +207 -0
  15. .venv/lib/python3.11/site-packages/numpy/_pytesttester.pyi +18 -0
  16. .venv/lib/python3.11/site-packages/numpy/conftest.py +138 -0
  17. .venv/lib/python3.11/site-packages/numpy/dtypes.py +77 -0
  18. .venv/lib/python3.11/site-packages/numpy/dtypes.pyi +43 -0
  19. .venv/lib/python3.11/site-packages/numpy/exceptions.py +231 -0
  20. .venv/lib/python3.11/site-packages/numpy/py.typed +0 -0
  21. .venv/lib/python3.11/site-packages/numpy/version.py +8 -0
  22. .venv/lib/python3.11/site-packages/psutil/__init__.py +2486 -0
  23. .venv/lib/python3.11/site-packages/psutil/__pycache__/__init__.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/psutil/__pycache__/_common.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/psutil/__pycache__/_compat.cpython-311.pyc +0 -0
  26. .venv/lib/python3.11/site-packages/psutil/__pycache__/_psaix.cpython-311.pyc +0 -0
  27. .venv/lib/python3.11/site-packages/psutil/__pycache__/_psbsd.cpython-311.pyc +0 -0
  28. .venv/lib/python3.11/site-packages/psutil/__pycache__/_psosx.cpython-311.pyc +0 -0
  29. .venv/lib/python3.11/site-packages/psutil/__pycache__/_psposix.cpython-311.pyc +0 -0
  30. .venv/lib/python3.11/site-packages/psutil/__pycache__/_pssunos.cpython-311.pyc +0 -0
  31. .venv/lib/python3.11/site-packages/psutil/__pycache__/_pswindows.cpython-311.pyc +0 -0
  32. .venv/lib/python3.11/site-packages/psutil/_psposix.py +243 -0
  33. .venv/lib/python3.11/site-packages/psutil/tests/__init__.py +2113 -0
  34. .venv/lib/python3.11/site-packages/psutil/tests/__main__.py +12 -0
  35. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/__main__.cpython-311.pyc +0 -0
  36. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_aix.cpython-311.pyc +0 -0
  37. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_bsd.cpython-311.pyc +0 -0
  38. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_connections.cpython-311.pyc +0 -0
  39. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_contracts.cpython-311.pyc +0 -0
  40. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_memleaks.cpython-311.pyc +0 -0
  41. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_misc.cpython-311.pyc +0 -0
  42. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_osx.cpython-311.pyc +0 -0
  43. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_posix.cpython-311.pyc +0 -0
  44. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_process_all.cpython-311.pyc +0 -0
  45. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_sunos.cpython-311.pyc +0 -0
  46. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_system.cpython-311.pyc +0 -0
  47. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_testutils.cpython-311.pyc +0 -0
  48. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_unicode.cpython-311.pyc +0 -0
  49. .venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_windows.cpython-311.pyc +0 -0
  50. .venv/lib/python3.11/site-packages/psutil/tests/test_aix.py +142 -0
.gitattributes CHANGED
@@ -347,3 +347,4 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/
347
  .venv/lib/python3.11/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
348
  .venv/lib/python3.11/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
349
  .venv/lib/python3.11/site-packages/torchvision/transforms/__pycache__/transforms.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
 
 
347
  .venv/lib/python3.11/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
348
  .venv/lib/python3.11/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
349
  .venv/lib/python3.11/site-packages/torchvision/transforms/__pycache__/transforms.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
350
+ .venv/lib/python3.11/site-packages/torchvision/transforms/v2/functional/__pycache__/_geometry.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
.venv/lib/python3.11/site-packages/click-8.1.8.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
.venv/lib/python3.11/site-packages/click-8.1.8.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2014 Pallets
2
+
3
+ Redistribution and use in source and binary forms, with or without
4
+ modification, are permitted provided that the following conditions are
5
+ met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright
8
+ notice, this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ 3. Neither the name of the copyright holder nor the names of its
15
+ contributors may be used to endorse or promote products derived from
16
+ this software without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
21
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
24
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.venv/lib/python3.11/site-packages/click-8.1.8.dist-info/METADATA ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.3
2
+ Name: click
3
+ Version: 8.1.8
4
+ Summary: Composable command line interface toolkit
5
+ Maintainer-email: Pallets <contact@palletsprojects.com>
6
+ Requires-Python: >=3.7
7
+ Description-Content-Type: text/markdown
8
+ Classifier: Development Status :: 5 - Production/Stable
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: BSD License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python
13
+ Classifier: Typing :: Typed
14
+ Requires-Dist: colorama; platform_system == 'Windows'
15
+ Requires-Dist: importlib-metadata; python_version < '3.8'
16
+ Project-URL: Changes, https://click.palletsprojects.com/changes/
17
+ Project-URL: Chat, https://discord.gg/pallets
18
+ Project-URL: Documentation, https://click.palletsprojects.com/
19
+ Project-URL: Donate, https://palletsprojects.com/donate
20
+ Project-URL: Source, https://github.com/pallets/click/
21
+
22
+ # $ click_
23
+
24
+ Click is a Python package for creating beautiful command line interfaces
25
+ in a composable way with as little code as necessary. It's the "Command
26
+ Line Interface Creation Kit". It's highly configurable but comes with
27
+ sensible defaults out of the box.
28
+
29
+ It aims to make the process of writing command line tools quick and fun
30
+ while also preventing any frustration caused by the inability to
31
+ implement an intended CLI API.
32
+
33
+ Click in three points:
34
+
35
+ - Arbitrary nesting of commands
36
+ - Automatic help page generation
37
+ - Supports lazy loading of subcommands at runtime
38
+
39
+
40
+ ## A Simple Example
41
+
42
+ ```python
43
+ import click
44
+
45
+ @click.command()
46
+ @click.option("--count", default=1, help="Number of greetings.")
47
+ @click.option("--name", prompt="Your name", help="The person to greet.")
48
+ def hello(count, name):
49
+ """Simple program that greets NAME for a total of COUNT times."""
50
+ for _ in range(count):
51
+ click.echo(f"Hello, {name}!")
52
+
53
+ if __name__ == '__main__':
54
+ hello()
55
+ ```
56
+
57
+ ```
58
+ $ python hello.py --count=3
59
+ Your name: Click
60
+ Hello, Click!
61
+ Hello, Click!
62
+ Hello, Click!
63
+ ```
64
+
65
+
66
+ ## Donate
67
+
68
+ The Pallets organization develops and supports Click and other popular
69
+ packages. In order to grow the community of contributors and users, and
70
+ allow the maintainers to devote more time to the projects, [please
71
+ donate today][].
72
+
73
+ [please donate today]: https://palletsprojects.com/donate
74
+
.venv/lib/python3.11/site-packages/click-8.1.8.dist-info/RECORD ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ click-8.1.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ click-8.1.8.dist-info/LICENSE.txt,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475
3
+ click-8.1.8.dist-info/METADATA,sha256=WJtQ6uGS2ybLfvUE4vC0XIhIBr4yFGwjrMBR2fiCQ-Q,2263
4
+ click-8.1.8.dist-info/RECORD,,
5
+ click-8.1.8.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82
6
+ click/__init__.py,sha256=j1DJeCbga4ribkv5uyvIAzI0oFN13fW9mevDKShFelo,3188
7
+ click/__pycache__/__init__.cpython-311.pyc,,
8
+ click/__pycache__/_compat.cpython-311.pyc,,
9
+ click/__pycache__/_termui_impl.cpython-311.pyc,,
10
+ click/__pycache__/_textwrap.cpython-311.pyc,,
11
+ click/__pycache__/_winconsole.cpython-311.pyc,,
12
+ click/__pycache__/core.cpython-311.pyc,,
13
+ click/__pycache__/decorators.cpython-311.pyc,,
14
+ click/__pycache__/exceptions.cpython-311.pyc,,
15
+ click/__pycache__/formatting.cpython-311.pyc,,
16
+ click/__pycache__/globals.cpython-311.pyc,,
17
+ click/__pycache__/parser.cpython-311.pyc,,
18
+ click/__pycache__/shell_completion.cpython-311.pyc,,
19
+ click/__pycache__/termui.cpython-311.pyc,,
20
+ click/__pycache__/testing.cpython-311.pyc,,
21
+ click/__pycache__/types.cpython-311.pyc,,
22
+ click/__pycache__/utils.cpython-311.pyc,,
23
+ click/_compat.py,sha256=IGKh_J5QdfKELitnRfTGHneejWxoCw_NX9tfMbdcg3w,18730
24
+ click/_termui_impl.py,sha256=a5z7I9gOFeMmu7Gb6_RPyQ8GPuVP1EeblixcWSPSQPk,24783
25
+ click/_textwrap.py,sha256=10fQ64OcBUMuK7mFvh8363_uoOxPlRItZBmKzRJDgoY,1353
26
+ click/_winconsole.py,sha256=5ju3jQkcZD0W27WEMGqmEP4y_crUVzPCqsX_FYb7BO0,7860
27
+ click/core.py,sha256=Q1nEVdctZwvIPOlt4vfHko0TYnHCeE40UEEul8Wpyvs,114748
28
+ click/decorators.py,sha256=7t6F-QWowtLh6F_6l-4YV4Y4yNTcqFQEu9i37zIz68s,18925
29
+ click/exceptions.py,sha256=V7zDT6emqJ8iNl0kF1P5kpFmLMWQ1T1L7aNNKM4YR0w,9600
30
+ click/formatting.py,sha256=Frf0-5W33-loyY_i9qrwXR8-STnW3m5gvyxLVUdyxyk,9706
31
+ click/globals.py,sha256=cuJ6Bbo073lgEEmhjr394PeM-QFmXM-Ci-wmfsd7H5g,1954
32
+ click/parser.py,sha256=h4sndcpF5OHrZQN8vD8IWb5OByvW7ABbhRToxovrqS8,19067
33
+ click/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
+ click/shell_completion.py,sha256=TR0dXEGcvWb9Eo3aaQEXGhnvNS3FF4H4QcuLnvAvYo4,18636
35
+ click/termui.py,sha256=dLxiS70UOvIYBda_nEEZaPAFOVDVmRs1sEPMuLDowQo,28310
36
+ click/testing.py,sha256=3RA8anCf7TZ8-5RAF5it2Te-aWXBAL5VLasQnMiC2ZQ,16282
37
+ click/types.py,sha256=BD5Qqq4h-8kawBmOIzJlmq4xzThAf4wCvaOLZSBDNx0,36422
38
+ click/utils.py,sha256=ce-IrO9ilII76LGkU354pOdHbepM8UftfNH7SfMU_28,20330
.venv/lib/python3.11/site-packages/click-8.1.8.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: flit 3.10.1
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
.venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
.venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/METADATA ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: filelock
3
+ Version: 3.17.0
4
+ Summary: A platform independent file lock.
5
+ Project-URL: Documentation, https://py-filelock.readthedocs.io
6
+ Project-URL: Homepage, https://github.com/tox-dev/py-filelock
7
+ Project-URL: Source, https://github.com/tox-dev/py-filelock
8
+ Project-URL: Tracker, https://github.com/tox-dev/py-filelock/issues
9
+ Maintainer-email: Bernát Gábor <gaborjbernat@gmail.com>
10
+ License-Expression: Unlicense
11
+ License-File: LICENSE
12
+ Keywords: application,cache,directory,log,user
13
+ Classifier: Development Status :: 5 - Production/Stable
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: The Unlicense (Unlicense)
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python
18
+ Classifier: Programming Language :: Python :: 3 :: Only
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Classifier: Programming Language :: Python :: 3.13
24
+ Classifier: Topic :: Internet
25
+ Classifier: Topic :: Software Development :: Libraries
26
+ Classifier: Topic :: System
27
+ Requires-Python: >=3.9
28
+ Provides-Extra: docs
29
+ Requires-Dist: furo>=2024.8.6; extra == 'docs'
30
+ Requires-Dist: sphinx-autodoc-typehints>=3; extra == 'docs'
31
+ Requires-Dist: sphinx>=8.1.3; extra == 'docs'
32
+ Provides-Extra: testing
33
+ Requires-Dist: covdefaults>=2.3; extra == 'testing'
34
+ Requires-Dist: coverage>=7.6.10; extra == 'testing'
35
+ Requires-Dist: diff-cover>=9.2.1; extra == 'testing'
36
+ Requires-Dist: pytest-asyncio>=0.25.2; extra == 'testing'
37
+ Requires-Dist: pytest-cov>=6; extra == 'testing'
38
+ Requires-Dist: pytest-mock>=3.14; extra == 'testing'
39
+ Requires-Dist: pytest-timeout>=2.3.1; extra == 'testing'
40
+ Requires-Dist: pytest>=8.3.4; extra == 'testing'
41
+ Requires-Dist: virtualenv>=20.28.1; extra == 'testing'
42
+ Provides-Extra: typing
43
+ Requires-Dist: typing-extensions>=4.12.2; (python_version < '3.11') and extra == 'typing'
44
+ Description-Content-Type: text/markdown
45
+
46
+ # filelock
47
+
48
+ [![PyPI](https://img.shields.io/pypi/v/filelock)](https://pypi.org/project/filelock/)
49
+ [![Supported Python
50
+ versions](https://img.shields.io/pypi/pyversions/filelock.svg)](https://pypi.org/project/filelock/)
51
+ [![Documentation
52
+ status](https://readthedocs.org/projects/py-filelock/badge/?version=latest)](https://py-filelock.readthedocs.io/en/latest/?badge=latest)
53
+ [![Code style:
54
+ black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
55
+ [![Downloads](https://static.pepy.tech/badge/filelock/month)](https://pepy.tech/project/filelock)
56
+ [![check](https://github.com/tox-dev/py-filelock/actions/workflows/check.yaml/badge.svg)](https://github.com/tox-dev/py-filelock/actions/workflows/check.yaml)
57
+
58
+ For more information checkout the [official documentation](https://py-filelock.readthedocs.io/en/latest/index.html).
.venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/RECORD ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filelock-3.17.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ filelock-3.17.0.dist-info/METADATA,sha256=gQqzfk2JJpzrBAdeo31F6ZOm_BPZANfa7AgwMPKlXdM,2897
3
+ filelock-3.17.0.dist-info/RECORD,,
4
+ filelock-3.17.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ filelock-3.17.0.dist-info/licenses/LICENSE,sha256=iNm062BXnBkew5HKBMFhMFctfu3EqG2qWL8oxuFMm80,1210
6
+ filelock/__init__.py,sha256=_t_-OAGXo_qyPa9lNQ1YnzVYEvSW3I0onPqzpomsVVg,1769
7
+ filelock/__pycache__/__init__.cpython-311.pyc,,
8
+ filelock/__pycache__/_api.cpython-311.pyc,,
9
+ filelock/__pycache__/_error.cpython-311.pyc,,
10
+ filelock/__pycache__/_soft.cpython-311.pyc,,
11
+ filelock/__pycache__/_unix.cpython-311.pyc,,
12
+ filelock/__pycache__/_util.cpython-311.pyc,,
13
+ filelock/__pycache__/_windows.cpython-311.pyc,,
14
+ filelock/__pycache__/asyncio.cpython-311.pyc,,
15
+ filelock/__pycache__/version.cpython-311.pyc,,
16
+ filelock/_api.py,sha256=2aATBeJ3-jtMj5OSm7EE539iNaTBsf13KXtcBMoi8oM,14545
17
+ filelock/_error.py,sha256=-5jMcjTu60YAvAO1UbqDD1GIEjVkwr8xCFwDBtMeYDg,787
18
+ filelock/_soft.py,sha256=haqtc_TB_KJbYv2a8iuEAclKuM4fMG1vTcp28sK919c,1711
19
+ filelock/_unix.py,sha256=w9H8dHeJlVFJMxV9LDUx3MYTnfovPmAHKPiZFQ6va8A,2261
20
+ filelock/_util.py,sha256=QHBoNFIYfbAThhotH3Q8E2acFc84wpG49-T-uu017ZE,1715
21
+ filelock/_windows.py,sha256=8k4XIBl_zZVfGC2gz0kEr8DZBvpNa8wdU9qeM1YrBb8,2179
22
+ filelock/asyncio.py,sha256=xjaIxFAjUI7XTlj58Lx7qnqEG9n3PTHjNr5H7EocogU,12465
23
+ filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
+ filelock/version.py,sha256=KdbrTz1mygb-tPODYZu2E4Sk2KYmeTUCHVpQLRpXAXo,413
.venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
.venv/lib/python3.11/site-packages/filelock-3.17.0.dist-info/licenses/LICENSE ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This is free and unencumbered software released into the public domain.
2
+
3
+ Anyone is free to copy, modify, publish, use, compile, sell, or
4
+ distribute this software, either in source code form or as a compiled
5
+ binary, for any purpose, commercial or non-commercial, and by any
6
+ means.
7
+
8
+ In jurisdictions that recognize copyright laws, the author or authors
9
+ of this software dedicate any and all copyright interest in the
10
+ software to the public domain. We make this dedication for the benefit
11
+ of the public at large and to the detriment of our heirs and
12
+ successors. We intend this dedication to be an overt act of
13
+ relinquishment in perpetuity of all present and future rights to this
14
+ software under copyright law.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19
+ IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
+ OTHER DEALINGS IN THE SOFTWARE.
23
+
24
+ For more information, please refer to <http://unlicense.org>
.venv/lib/python3.11/site-packages/numpy/__init__.cython-30.pxd ADDED
@@ -0,0 +1,1050 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NumPy static imports for Cython >= 3.0
2
+ #
3
+ # If any of the PyArray_* functions are called, import_array must be
4
+ # called first. This is done automatically by Cython 3.0+ if a call
5
+ # is not detected inside of the module.
6
+ #
7
+ # Author: Dag Sverre Seljebotn
8
+ #
9
+
10
+ from cpython.ref cimport Py_INCREF
11
+ from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck
12
+ cimport libc.stdio as stdio
13
+
14
+
15
+ cdef extern from *:
16
+ # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython.
17
+ # See https://github.com/cython/cython/issues/3573
18
+ """
19
+ /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */
20
+ """
21
+
22
+
23
+ cdef extern from "Python.h":
24
+ ctypedef int Py_intptr_t
25
+
26
+ cdef extern from "numpy/arrayobject.h":
27
+ ctypedef Py_intptr_t npy_intp
28
+ ctypedef size_t npy_uintp
29
+
30
+ cdef enum NPY_TYPES:
31
+ NPY_BOOL
32
+ NPY_BYTE
33
+ NPY_UBYTE
34
+ NPY_SHORT
35
+ NPY_USHORT
36
+ NPY_INT
37
+ NPY_UINT
38
+ NPY_LONG
39
+ NPY_ULONG
40
+ NPY_LONGLONG
41
+ NPY_ULONGLONG
42
+ NPY_FLOAT
43
+ NPY_DOUBLE
44
+ NPY_LONGDOUBLE
45
+ NPY_CFLOAT
46
+ NPY_CDOUBLE
47
+ NPY_CLONGDOUBLE
48
+ NPY_OBJECT
49
+ NPY_STRING
50
+ NPY_UNICODE
51
+ NPY_VOID
52
+ NPY_DATETIME
53
+ NPY_TIMEDELTA
54
+ NPY_NTYPES
55
+ NPY_NOTYPE
56
+
57
+ NPY_INT8
58
+ NPY_INT16
59
+ NPY_INT32
60
+ NPY_INT64
61
+ NPY_INT128
62
+ NPY_INT256
63
+ NPY_UINT8
64
+ NPY_UINT16
65
+ NPY_UINT32
66
+ NPY_UINT64
67
+ NPY_UINT128
68
+ NPY_UINT256
69
+ NPY_FLOAT16
70
+ NPY_FLOAT32
71
+ NPY_FLOAT64
72
+ NPY_FLOAT80
73
+ NPY_FLOAT96
74
+ NPY_FLOAT128
75
+ NPY_FLOAT256
76
+ NPY_COMPLEX32
77
+ NPY_COMPLEX64
78
+ NPY_COMPLEX128
79
+ NPY_COMPLEX160
80
+ NPY_COMPLEX192
81
+ NPY_COMPLEX256
82
+ NPY_COMPLEX512
83
+
84
+ NPY_INTP
85
+
86
+ ctypedef enum NPY_ORDER:
87
+ NPY_ANYORDER
88
+ NPY_CORDER
89
+ NPY_FORTRANORDER
90
+ NPY_KEEPORDER
91
+
92
+ ctypedef enum NPY_CASTING:
93
+ NPY_NO_CASTING
94
+ NPY_EQUIV_CASTING
95
+ NPY_SAFE_CASTING
96
+ NPY_SAME_KIND_CASTING
97
+ NPY_UNSAFE_CASTING
98
+
99
+ ctypedef enum NPY_CLIPMODE:
100
+ NPY_CLIP
101
+ NPY_WRAP
102
+ NPY_RAISE
103
+
104
+ ctypedef enum NPY_SCALARKIND:
105
+ NPY_NOSCALAR,
106
+ NPY_BOOL_SCALAR,
107
+ NPY_INTPOS_SCALAR,
108
+ NPY_INTNEG_SCALAR,
109
+ NPY_FLOAT_SCALAR,
110
+ NPY_COMPLEX_SCALAR,
111
+ NPY_OBJECT_SCALAR
112
+
113
+ ctypedef enum NPY_SORTKIND:
114
+ NPY_QUICKSORT
115
+ NPY_HEAPSORT
116
+ NPY_MERGESORT
117
+
118
+ ctypedef enum NPY_SEARCHSIDE:
119
+ NPY_SEARCHLEFT
120
+ NPY_SEARCHRIGHT
121
+
122
+ enum:
123
+ # DEPRECATED since NumPy 1.7 ! Do not use in new code!
124
+ NPY_C_CONTIGUOUS
125
+ NPY_F_CONTIGUOUS
126
+ NPY_CONTIGUOUS
127
+ NPY_FORTRAN
128
+ NPY_OWNDATA
129
+ NPY_FORCECAST
130
+ NPY_ENSURECOPY
131
+ NPY_ENSUREARRAY
132
+ NPY_ELEMENTSTRIDES
133
+ NPY_ALIGNED
134
+ NPY_NOTSWAPPED
135
+ NPY_WRITEABLE
136
+ NPY_ARR_HAS_DESCR
137
+
138
+ NPY_BEHAVED
139
+ NPY_BEHAVED_NS
140
+ NPY_CARRAY
141
+ NPY_CARRAY_RO
142
+ NPY_FARRAY
143
+ NPY_FARRAY_RO
144
+ NPY_DEFAULT
145
+
146
+ NPY_IN_ARRAY
147
+ NPY_OUT_ARRAY
148
+ NPY_INOUT_ARRAY
149
+ NPY_IN_FARRAY
150
+ NPY_OUT_FARRAY
151
+ NPY_INOUT_FARRAY
152
+
153
+ NPY_UPDATE_ALL
154
+
155
+ enum:
156
+ # Added in NumPy 1.7 to replace the deprecated enums above.
157
+ NPY_ARRAY_C_CONTIGUOUS
158
+ NPY_ARRAY_F_CONTIGUOUS
159
+ NPY_ARRAY_OWNDATA
160
+ NPY_ARRAY_FORCECAST
161
+ NPY_ARRAY_ENSURECOPY
162
+ NPY_ARRAY_ENSUREARRAY
163
+ NPY_ARRAY_ELEMENTSTRIDES
164
+ NPY_ARRAY_ALIGNED
165
+ NPY_ARRAY_NOTSWAPPED
166
+ NPY_ARRAY_WRITEABLE
167
+ NPY_ARRAY_WRITEBACKIFCOPY
168
+
169
+ NPY_ARRAY_BEHAVED
170
+ NPY_ARRAY_BEHAVED_NS
171
+ NPY_ARRAY_CARRAY
172
+ NPY_ARRAY_CARRAY_RO
173
+ NPY_ARRAY_FARRAY
174
+ NPY_ARRAY_FARRAY_RO
175
+ NPY_ARRAY_DEFAULT
176
+
177
+ NPY_ARRAY_IN_ARRAY
178
+ NPY_ARRAY_OUT_ARRAY
179
+ NPY_ARRAY_INOUT_ARRAY
180
+ NPY_ARRAY_IN_FARRAY
181
+ NPY_ARRAY_OUT_FARRAY
182
+ NPY_ARRAY_INOUT_FARRAY
183
+
184
+ NPY_ARRAY_UPDATE_ALL
185
+
186
+ cdef enum:
187
+ NPY_MAXDIMS
188
+
189
+ npy_intp NPY_MAX_ELSIZE
190
+
191
+ ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
192
+
193
+ ctypedef struct PyArray_ArrayDescr:
194
+ # shape is a tuple, but Cython doesn't support "tuple shape"
195
+ # inside a non-PyObject declaration, so we have to declare it
196
+ # as just a PyObject*.
197
+ PyObject* shape
198
+
199
+ ctypedef struct PyArray_Descr:
200
+ pass
201
+
202
+ ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]:
203
+ # Use PyDataType_* macros when possible, however there are no macros
204
+ # for accessing some of the fields, so some are defined.
205
+ cdef PyTypeObject* typeobj
206
+ cdef char kind
207
+ cdef char type
208
+ # Numpy sometimes mutates this without warning (e.g. it'll
209
+ # sometimes change "|" to "<" in shared dtype objects on
210
+ # little-endian machines). If this matters to you, use
211
+ # PyArray_IsNativeByteOrder(dtype.byteorder) instead of
212
+ # directly accessing this field.
213
+ cdef char byteorder
214
+ cdef char flags
215
+ cdef int type_num
216
+ cdef int itemsize "elsize"
217
+ cdef int alignment
218
+ cdef object fields
219
+ cdef tuple names
220
+ # Use PyDataType_HASSUBARRAY to test whether this field is
221
+ # valid (the pointer can be NULL). Most users should access
222
+ # this field via the inline helper method PyDataType_SHAPE.
223
+ cdef PyArray_ArrayDescr* subarray
224
+
225
+ ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]:
226
+ # Use through macros
227
+ pass
228
+
229
+ ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]:
230
+ # Use through macros
231
+ pass
232
+
233
+ ctypedef struct PyArrayObject:
234
+ # For use in situations where ndarray can't replace PyArrayObject*,
235
+ # like PyArrayObject**.
236
+ pass
237
+
238
+ ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]:
239
+ cdef __cythonbufferdefaults__ = {"mode": "strided"}
240
+
241
+ # NOTE: no field declarations since direct access is deprecated since NumPy 1.7
242
+ # Instead, we use properties that map to the corresponding C-API functions.
243
+
244
+ @property
245
+ cdef inline PyObject* base(self) nogil:
246
+ """Returns a borrowed reference to the object owning the data/memory.
247
+ """
248
+ return PyArray_BASE(self)
249
+
250
+ @property
251
+ cdef inline dtype descr(self):
252
+ """Returns an owned reference to the dtype of the array.
253
+ """
254
+ return <dtype>PyArray_DESCR(self)
255
+
256
+ @property
257
+ cdef inline int ndim(self) nogil:
258
+ """Returns the number of dimensions in the array.
259
+ """
260
+ return PyArray_NDIM(self)
261
+
262
+ @property
263
+ cdef inline npy_intp *shape(self) nogil:
264
+ """Returns a pointer to the dimensions/shape of the array.
265
+ The number of elements matches the number of dimensions of the array (ndim).
266
+ Can return NULL for 0-dimensional arrays.
267
+ """
268
+ return PyArray_DIMS(self)
269
+
270
+ @property
271
+ cdef inline npy_intp *strides(self) nogil:
272
+ """Returns a pointer to the strides of the array.
273
+ The number of elements matches the number of dimensions of the array (ndim).
274
+ """
275
+ return PyArray_STRIDES(self)
276
+
277
+ @property
278
+ cdef inline npy_intp size(self) nogil:
279
+ """Returns the total size (in number of elements) of the array.
280
+ """
281
+ return PyArray_SIZE(self)
282
+
283
+ @property
284
+ cdef inline char* data(self) nogil:
285
+ """The pointer to the data buffer as a char*.
286
+ This is provided for legacy reasons to avoid direct struct field access.
287
+ For new code that needs this access, you probably want to cast the result
288
+ of `PyArray_DATA()` instead, which returns a 'void*'.
289
+ """
290
+ return PyArray_BYTES(self)
291
+
292
+ ctypedef unsigned char npy_bool
293
+
294
+ ctypedef signed char npy_byte
295
+ ctypedef signed short npy_short
296
+ ctypedef signed int npy_int
297
+ ctypedef signed long npy_long
298
+ ctypedef signed long long npy_longlong
299
+
300
+ ctypedef unsigned char npy_ubyte
301
+ ctypedef unsigned short npy_ushort
302
+ ctypedef unsigned int npy_uint
303
+ ctypedef unsigned long npy_ulong
304
+ ctypedef unsigned long long npy_ulonglong
305
+
306
+ ctypedef float npy_float
307
+ ctypedef double npy_double
308
+ ctypedef long double npy_longdouble
309
+
310
+ ctypedef signed char npy_int8
311
+ ctypedef signed short npy_int16
312
+ ctypedef signed int npy_int32
313
+ ctypedef signed long long npy_int64
314
+ ctypedef signed long long npy_int96
315
+ ctypedef signed long long npy_int128
316
+
317
+ ctypedef unsigned char npy_uint8
318
+ ctypedef unsigned short npy_uint16
319
+ ctypedef unsigned int npy_uint32
320
+ ctypedef unsigned long long npy_uint64
321
+ ctypedef unsigned long long npy_uint96
322
+ ctypedef unsigned long long npy_uint128
323
+
324
+ ctypedef float npy_float32
325
+ ctypedef double npy_float64
326
+ ctypedef long double npy_float80
327
+ ctypedef long double npy_float96
328
+ ctypedef long double npy_float128
329
+
330
+ ctypedef struct npy_cfloat:
331
+ float real
332
+ float imag
333
+
334
+ ctypedef struct npy_cdouble:
335
+ double real
336
+ double imag
337
+
338
+ ctypedef struct npy_clongdouble:
339
+ long double real
340
+ long double imag
341
+
342
+ ctypedef struct npy_complex64:
343
+ float real
344
+ float imag
345
+
346
+ ctypedef struct npy_complex128:
347
+ double real
348
+ double imag
349
+
350
+ ctypedef struct npy_complex160:
351
+ long double real
352
+ long double imag
353
+
354
+ ctypedef struct npy_complex192:
355
+ long double real
356
+ long double imag
357
+
358
+ ctypedef struct npy_complex256:
359
+ long double real
360
+ long double imag
361
+
362
+ ctypedef struct PyArray_Dims:
363
+ npy_intp *ptr
364
+ int len
365
+
366
+ int _import_array() except -1
367
+ # A second definition so _import_array isn't marked as used when we use it here.
368
+ # Do not use - subject to change any time.
369
+ int __pyx_import_array "_import_array"() except -1
370
+
371
+ #
372
+ # Macros from ndarrayobject.h
373
+ #
374
+ bint PyArray_CHKFLAGS(ndarray m, int flags) nogil
375
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil
376
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil
377
+ bint PyArray_ISCONTIGUOUS(ndarray m) nogil
378
+ bint PyArray_ISWRITEABLE(ndarray m) nogil
379
+ bint PyArray_ISALIGNED(ndarray m) nogil
380
+
381
+ int PyArray_NDIM(ndarray) nogil
382
+ bint PyArray_ISONESEGMENT(ndarray) nogil
383
+ bint PyArray_ISFORTRAN(ndarray) nogil
384
+ int PyArray_FORTRANIF(ndarray) nogil
385
+
386
+ void* PyArray_DATA(ndarray) nogil
387
+ char* PyArray_BYTES(ndarray) nogil
388
+
389
+ npy_intp* PyArray_DIMS(ndarray) nogil
390
+ npy_intp* PyArray_STRIDES(ndarray) nogil
391
+ npy_intp PyArray_DIM(ndarray, size_t) nogil
392
+ npy_intp PyArray_STRIDE(ndarray, size_t) nogil
393
+
394
+ PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference!
395
+ PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype!
396
+ PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr.
397
+ int PyArray_FLAGS(ndarray) nogil
398
+ void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
399
+ void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7
400
+ npy_intp PyArray_ITEMSIZE(ndarray) nogil
401
+ int PyArray_TYPE(ndarray arr) nogil
402
+
403
+ object PyArray_GETITEM(ndarray arr, void *itemptr)
404
+ int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1
405
+
406
+ bint PyTypeNum_ISBOOL(int) nogil
407
+ bint PyTypeNum_ISUNSIGNED(int) nogil
408
+ bint PyTypeNum_ISSIGNED(int) nogil
409
+ bint PyTypeNum_ISINTEGER(int) nogil
410
+ bint PyTypeNum_ISFLOAT(int) nogil
411
+ bint PyTypeNum_ISNUMBER(int) nogil
412
+ bint PyTypeNum_ISSTRING(int) nogil
413
+ bint PyTypeNum_ISCOMPLEX(int) nogil
414
+ bint PyTypeNum_ISPYTHON(int) nogil
415
+ bint PyTypeNum_ISFLEXIBLE(int) nogil
416
+ bint PyTypeNum_ISUSERDEF(int) nogil
417
+ bint PyTypeNum_ISEXTENDED(int) nogil
418
+ bint PyTypeNum_ISOBJECT(int) nogil
419
+
420
+ bint PyDataType_ISBOOL(dtype) nogil
421
+ bint PyDataType_ISUNSIGNED(dtype) nogil
422
+ bint PyDataType_ISSIGNED(dtype) nogil
423
+ bint PyDataType_ISINTEGER(dtype) nogil
424
+ bint PyDataType_ISFLOAT(dtype) nogil
425
+ bint PyDataType_ISNUMBER(dtype) nogil
426
+ bint PyDataType_ISSTRING(dtype) nogil
427
+ bint PyDataType_ISCOMPLEX(dtype) nogil
428
+ bint PyDataType_ISPYTHON(dtype) nogil
429
+ bint PyDataType_ISFLEXIBLE(dtype) nogil
430
+ bint PyDataType_ISUSERDEF(dtype) nogil
431
+ bint PyDataType_ISEXTENDED(dtype) nogil
432
+ bint PyDataType_ISOBJECT(dtype) nogil
433
+ bint PyDataType_HASFIELDS(dtype) nogil
434
+ bint PyDataType_HASSUBARRAY(dtype) nogil
435
+
436
+ bint PyArray_ISBOOL(ndarray) nogil
437
+ bint PyArray_ISUNSIGNED(ndarray) nogil
438
+ bint PyArray_ISSIGNED(ndarray) nogil
439
+ bint PyArray_ISINTEGER(ndarray) nogil
440
+ bint PyArray_ISFLOAT(ndarray) nogil
441
+ bint PyArray_ISNUMBER(ndarray) nogil
442
+ bint PyArray_ISSTRING(ndarray) nogil
443
+ bint PyArray_ISCOMPLEX(ndarray) nogil
444
+ bint PyArray_ISPYTHON(ndarray) nogil
445
+ bint PyArray_ISFLEXIBLE(ndarray) nogil
446
+ bint PyArray_ISUSERDEF(ndarray) nogil
447
+ bint PyArray_ISEXTENDED(ndarray) nogil
448
+ bint PyArray_ISOBJECT(ndarray) nogil
449
+ bint PyArray_HASFIELDS(ndarray) nogil
450
+
451
+ bint PyArray_ISVARIABLE(ndarray) nogil
452
+
453
+ bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil
454
+ bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder
455
+ bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder
456
+ bint PyArray_ISNOTSWAPPED(ndarray) nogil
457
+ bint PyArray_ISBYTESWAPPED(ndarray) nogil
458
+
459
+ bint PyArray_FLAGSWAP(ndarray, int) nogil
460
+
461
+ bint PyArray_ISCARRAY(ndarray) nogil
462
+ bint PyArray_ISCARRAY_RO(ndarray) nogil
463
+ bint PyArray_ISFARRAY(ndarray) nogil
464
+ bint PyArray_ISFARRAY_RO(ndarray) nogil
465
+ bint PyArray_ISBEHAVED(ndarray) nogil
466
+ bint PyArray_ISBEHAVED_RO(ndarray) nogil
467
+
468
+
469
+ bint PyDataType_ISNOTSWAPPED(dtype) nogil
470
+ bint PyDataType_ISBYTESWAPPED(dtype) nogil
471
+
472
+ bint PyArray_DescrCheck(object)
473
+
474
+ bint PyArray_Check(object)
475
+ bint PyArray_CheckExact(object)
476
+
477
+ # Cannot be supported due to out arg:
478
+ # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
479
+ # bint PyArray_HasArrayInterface(op, out)
480
+
481
+
482
+ bint PyArray_IsZeroDim(object)
483
+ # Cannot be supported due to ## ## in macro:
484
+ # bint PyArray_IsScalar(object, verbatim work)
485
+ bint PyArray_CheckScalar(object)
486
+ bint PyArray_IsPythonNumber(object)
487
+ bint PyArray_IsPythonScalar(object)
488
+ bint PyArray_IsAnyScalar(object)
489
+ bint PyArray_CheckAnyScalar(object)
490
+
491
+ ndarray PyArray_GETCONTIGUOUS(ndarray)
492
+ bint PyArray_SAMESHAPE(ndarray, ndarray) nogil
493
+ npy_intp PyArray_SIZE(ndarray) nogil
494
+ npy_intp PyArray_NBYTES(ndarray) nogil
495
+
496
+ object PyArray_FROM_O(object)
497
+ object PyArray_FROM_OF(object m, int flags)
498
+ object PyArray_FROM_OT(object m, int type)
499
+ object PyArray_FROM_OTF(object m, int type, int flags)
500
+ object PyArray_FROMANY(object m, int type, int min, int max, int flags)
501
+ object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
502
+ object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
503
+ void PyArray_FILLWBYTE(object, int val)
504
+ npy_intp PyArray_REFCOUNT(object)
505
+ object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
506
+ unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
507
+ bint PyArray_EquivByteorders(int b1, int b2) nogil
508
+ object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
509
+ object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
510
+ #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
511
+ object PyArray_ToScalar(void* data, ndarray arr)
512
+
513
+ void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil
514
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil
515
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil
516
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil
517
+
518
+ # Cannot be supported due to out arg
519
+ # void PyArray_DESCR_REPLACE(descr)
520
+
521
+
522
+ object PyArray_Copy(ndarray)
523
+ object PyArray_FromObject(object op, int type, int min_depth, int max_depth)
524
+ object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth)
525
+ object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth)
526
+
527
+ object PyArray_Cast(ndarray mp, int type_num)
528
+ object PyArray_Take(ndarray ap, object items, int axis)
529
+ object PyArray_Put(ndarray ap, object items, object values)
530
+
531
+ void PyArray_ITER_RESET(flatiter it) nogil
532
+ void PyArray_ITER_NEXT(flatiter it) nogil
533
+ void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
534
+ void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
535
+ void* PyArray_ITER_DATA(flatiter it) nogil
536
+ bint PyArray_ITER_NOTDONE(flatiter it) nogil
537
+
538
+ void PyArray_MultiIter_RESET(broadcast multi) nogil
539
+ void PyArray_MultiIter_NEXT(broadcast multi) nogil
540
+ void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
541
+ void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
542
+ void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
543
+ void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
544
+ bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
545
+
546
+ # Functions from __multiarray_api.h
547
+
548
+ # Functions taking dtype and returning object/ndarray are disabled
549
+ # for now as they steal dtype references. I'm conservative and disable
550
+ # more than is probably needed until it can be checked further.
551
+ int PyArray_SetNumericOps (object) except -1
552
+ object PyArray_GetNumericOps ()
553
+ int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF...
554
+ int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF...
555
+ void PyArray_SetStringFunction (object, int)
556
+ dtype PyArray_DescrFromType (int)
557
+ object PyArray_TypeObjectFromType (int)
558
+ char * PyArray_Zero (ndarray)
559
+ char * PyArray_One (ndarray)
560
+ #object PyArray_CastToType (ndarray, dtype, int)
561
+ int PyArray_CastTo (ndarray, ndarray) except -1
562
+ int PyArray_CastAnyTo (ndarray, ndarray) except -1
563
+ int PyArray_CanCastSafely (int, int) # writes errors
564
+ npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors
565
+ int PyArray_ObjectType (object, int) except 0
566
+ dtype PyArray_DescrFromObject (object, dtype)
567
+ #ndarray* PyArray_ConvertToCommonType (object, int *)
568
+ dtype PyArray_DescrFromScalar (object)
569
+ dtype PyArray_DescrFromTypeObject (object)
570
+ npy_intp PyArray_Size (object)
571
+ #object PyArray_Scalar (void *, dtype, object)
572
+ #object PyArray_FromScalar (object, dtype)
573
+ void PyArray_ScalarAsCtype (object, void *)
574
+ #int PyArray_CastScalarToCtype (object, void *, dtype)
575
+ #int PyArray_CastScalarDirect (object, dtype, void *, int)
576
+ object PyArray_ScalarFromObject (object)
577
+ #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
578
+ object PyArray_FromDims (int, int *, int)
579
+ #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *)
580
+ #object PyArray_FromAny (object, dtype, int, int, int, object)
581
+ object PyArray_EnsureArray (object)
582
+ object PyArray_EnsureAnyArray (object)
583
+ #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
584
+ #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
585
+ #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
586
+ #object PyArray_FromIter (object, dtype, npy_intp)
587
+ object PyArray_Return (ndarray)
588
+ #object PyArray_GetField (ndarray, dtype, int)
589
+ #int PyArray_SetField (ndarray, dtype, int, object) except -1
590
+ object PyArray_Byteswap (ndarray, npy_bool)
591
+ object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
592
+ int PyArray_MoveInto (ndarray, ndarray) except -1
593
+ int PyArray_CopyInto (ndarray, ndarray) except -1
594
+ int PyArray_CopyAnyInto (ndarray, ndarray) except -1
595
+ int PyArray_CopyObject (ndarray, object) except -1
596
+ object PyArray_NewCopy (ndarray, NPY_ORDER)
597
+ object PyArray_ToList (ndarray)
598
+ object PyArray_ToString (ndarray, NPY_ORDER)
599
+ int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1
600
+ int PyArray_Dump (object, object, int) except -1
601
+ object PyArray_Dumps (object, int)
602
+ int PyArray_ValidType (int) # Cannot error
603
+ void PyArray_UpdateFlags (ndarray, int)
604
+ object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object)
605
+ #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object)
606
+ #dtype PyArray_DescrNew (dtype)
607
+ dtype PyArray_DescrNewFromType (int)
608
+ double PyArray_GetPriority (object, double) # clears errors as of 1.25
609
+ object PyArray_IterNew (object)
610
+ object PyArray_MultiIterNew (int, ...)
611
+
612
+ int PyArray_PyIntAsInt (object) except? -1
613
+ npy_intp PyArray_PyIntAsIntp (object)
614
+ int PyArray_Broadcast (broadcast) except -1
615
+ void PyArray_FillObjectArray (ndarray, object) except *
616
+ int PyArray_FillWithScalar (ndarray, object) except -1
617
+ npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
618
+ dtype PyArray_DescrNewByteorder (dtype, char)
619
+ object PyArray_IterAllButAxis (object, int *)
620
+ #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
621
+ #object PyArray_FromArray (ndarray, dtype, int)
622
+ object PyArray_FromInterface (object)
623
+ object PyArray_FromStructInterface (object)
624
+ #object PyArray_FromArrayAttr (object, dtype, object)
625
+ #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
626
+ int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
627
+ object PyArray_NewFlagsObject (object)
628
+ npy_bool PyArray_CanCastScalar (type, type)
629
+ #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t)
630
+ int PyArray_RemoveSmallest (broadcast) except -1
631
+ int PyArray_ElementStrides (object)
632
+ void PyArray_Item_INCREF (char *, dtype) except *
633
+ void PyArray_Item_XDECREF (char *, dtype) except *
634
+ object PyArray_FieldNames (object)
635
+ object PyArray_Transpose (ndarray, PyArray_Dims *)
636
+ object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
637
+ object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
638
+ object PyArray_PutMask (ndarray, object, object)
639
+ object PyArray_Repeat (ndarray, object, int)
640
+ object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
641
+ int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1
642
+ object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
643
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *)
644
+ object PyArray_ArgMax (ndarray, int, ndarray)
645
+ object PyArray_ArgMin (ndarray, int, ndarray)
646
+ object PyArray_Reshape (ndarray, object)
647
+ object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
648
+ object PyArray_Squeeze (ndarray)
649
+ #object PyArray_View (ndarray, dtype, type)
650
+ object PyArray_SwapAxes (ndarray, int, int)
651
+ object PyArray_Max (ndarray, int, ndarray)
652
+ object PyArray_Min (ndarray, int, ndarray)
653
+ object PyArray_Ptp (ndarray, int, ndarray)
654
+ object PyArray_Mean (ndarray, int, int, ndarray)
655
+ object PyArray_Trace (ndarray, int, int, int, int, ndarray)
656
+ object PyArray_Diagonal (ndarray, int, int, int)
657
+ object PyArray_Clip (ndarray, object, object, ndarray)
658
+ object PyArray_Conjugate (ndarray, ndarray)
659
+ object PyArray_Nonzero (ndarray)
660
+ object PyArray_Std (ndarray, int, int, ndarray, int)
661
+ object PyArray_Sum (ndarray, int, int, ndarray)
662
+ object PyArray_CumSum (ndarray, int, int, ndarray)
663
+ object PyArray_Prod (ndarray, int, int, ndarray)
664
+ object PyArray_CumProd (ndarray, int, int, ndarray)
665
+ object PyArray_All (ndarray, int, ndarray)
666
+ object PyArray_Any (ndarray, int, ndarray)
667
+ object PyArray_Compress (ndarray, object, int, ndarray)
668
+ object PyArray_Flatten (ndarray, NPY_ORDER)
669
+ object PyArray_Ravel (ndarray, NPY_ORDER)
670
+ npy_intp PyArray_MultiplyList (npy_intp *, int)
671
+ int PyArray_MultiplyIntList (int *, int)
672
+ void * PyArray_GetPtr (ndarray, npy_intp*)
673
+ int PyArray_CompareLists (npy_intp *, npy_intp *, int)
674
+ #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
675
+ #int PyArray_As1D (object*, char **, int *, int)
676
+ #int PyArray_As2D (object*, char ***, int *, int *, int)
677
+ int PyArray_Free (object, void *)
678
+ #int PyArray_Converter (object, object*)
679
+ int PyArray_IntpFromSequence (object, npy_intp *, int) except -1
680
+ object PyArray_Concatenate (object, int)
681
+ object PyArray_InnerProduct (object, object)
682
+ object PyArray_MatrixProduct (object, object)
683
+ object PyArray_CopyAndTranspose (object)
684
+ object PyArray_Correlate (object, object, int)
685
+ int PyArray_TypestrConvert (int, int)
686
+ #int PyArray_DescrConverter (object, dtype*) except 0
687
+ #int PyArray_DescrConverter2 (object, dtype*) except 0
688
+ int PyArray_IntpConverter (object, PyArray_Dims *) except 0
689
+ #int PyArray_BufferConverter (object, chunk) except 0
690
+ int PyArray_AxisConverter (object, int *) except 0
691
+ int PyArray_BoolConverter (object, npy_bool *) except 0
692
+ int PyArray_ByteorderConverter (object, char *) except 0
693
+ int PyArray_OrderConverter (object, NPY_ORDER *) except 0
694
+ unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors
695
+ #object PyArray_Zeros (int, npy_intp *, dtype, int)
696
+ #object PyArray_Empty (int, npy_intp *, dtype, int)
697
+ object PyArray_Where (object, object, object)
698
+ object PyArray_Arange (double, double, double, int)
699
+ #object PyArray_ArangeObj (object, object, object, dtype)
700
+ int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0
701
+ object PyArray_LexSort (object, int)
702
+ object PyArray_Round (ndarray, int, ndarray)
703
+ unsigned char PyArray_EquivTypenums (int, int)
704
+ int PyArray_RegisterDataType (dtype) except -1
705
+ int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1
706
+ int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1
707
+ #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
708
+ object PyArray_IntTupleFromIntp (int, npy_intp *)
709
+ int PyArray_TypeNumFromName (char *)
710
+ int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0
711
+ #int PyArray_OutputConverter (object, ndarray*) except 0
712
+ object PyArray_BroadcastToShape (object, npy_intp *, int)
713
+ void _PyArray_SigintHandler (int)
714
+ void* _PyArray_GetSigintBuf ()
715
+ #int PyArray_DescrAlignConverter (object, dtype*) except 0
716
+ #int PyArray_DescrAlignConverter2 (object, dtype*) except 0
717
+ int PyArray_SearchsideConverter (object, void *) except 0
718
+ object PyArray_CheckAxis (ndarray, int *, int)
719
+ npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
720
+ int PyArray_CompareString (char *, char *, size_t)
721
+ int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead.
722
+
723
+
724
+ # Typedefs that matches the runtime dtype objects in
725
+ # the numpy module.
726
+
727
+ # The ones that are commented out needs an IFDEF function
728
+ # in Cython to enable them only on the right systems.
729
+
730
+ ctypedef npy_int8 int8_t
731
+ ctypedef npy_int16 int16_t
732
+ ctypedef npy_int32 int32_t
733
+ ctypedef npy_int64 int64_t
734
+ #ctypedef npy_int96 int96_t
735
+ #ctypedef npy_int128 int128_t
736
+
737
+ ctypedef npy_uint8 uint8_t
738
+ ctypedef npy_uint16 uint16_t
739
+ ctypedef npy_uint32 uint32_t
740
+ ctypedef npy_uint64 uint64_t
741
+ #ctypedef npy_uint96 uint96_t
742
+ #ctypedef npy_uint128 uint128_t
743
+
744
+ ctypedef npy_float32 float32_t
745
+ ctypedef npy_float64 float64_t
746
+ #ctypedef npy_float80 float80_t
747
+ #ctypedef npy_float128 float128_t
748
+
749
+ ctypedef float complex complex64_t
750
+ ctypedef double complex complex128_t
751
+
752
+ # The int types are mapped a bit surprising --
753
+ # numpy.int corresponds to 'l' and numpy.long to 'q'
754
+ ctypedef npy_long int_t
755
+ ctypedef npy_longlong longlong_t
756
+
757
+ ctypedef npy_ulong uint_t
758
+ ctypedef npy_ulonglong ulonglong_t
759
+
760
+ ctypedef npy_intp intp_t
761
+ ctypedef npy_uintp uintp_t
762
+
763
+ ctypedef npy_double float_t
764
+ ctypedef npy_double double_t
765
+ ctypedef npy_longdouble longdouble_t
766
+
767
+ ctypedef npy_cfloat cfloat_t
768
+ ctypedef npy_cdouble cdouble_t
769
+ ctypedef npy_clongdouble clongdouble_t
770
+
771
+ ctypedef npy_cdouble complex_t
772
+
773
+ cdef inline object PyArray_MultiIterNew1(a):
774
+ return PyArray_MultiIterNew(1, <void*>a)
775
+
776
+ cdef inline object PyArray_MultiIterNew2(a, b):
777
+ return PyArray_MultiIterNew(2, <void*>a, <void*>b)
778
+
779
+ cdef inline object PyArray_MultiIterNew3(a, b, c):
780
+ return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
781
+
782
+ cdef inline object PyArray_MultiIterNew4(a, b, c, d):
783
+ return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
784
+
785
+ cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
786
+ return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
787
+
788
+ cdef inline tuple PyDataType_SHAPE(dtype d):
789
+ if PyDataType_HASSUBARRAY(d):
790
+ return <tuple>d.subarray.shape
791
+ else:
792
+ return ()
793
+
794
+
795
+ cdef extern from "numpy/ndarrayobject.h":
796
+ PyTypeObject PyTimedeltaArrType_Type
797
+ PyTypeObject PyDatetimeArrType_Type
798
+ ctypedef int64_t npy_timedelta
799
+ ctypedef int64_t npy_datetime
800
+
801
+ cdef extern from "numpy/ndarraytypes.h":
802
+ ctypedef struct PyArray_DatetimeMetaData:
803
+ NPY_DATETIMEUNIT base
804
+ int64_t num
805
+
806
+ cdef extern from "numpy/arrayscalars.h":
807
+
808
+ # abstract types
809
+ ctypedef class numpy.generic [object PyObject]:
810
+ pass
811
+ ctypedef class numpy.number [object PyObject]:
812
+ pass
813
+ ctypedef class numpy.integer [object PyObject]:
814
+ pass
815
+ ctypedef class numpy.signedinteger [object PyObject]:
816
+ pass
817
+ ctypedef class numpy.unsignedinteger [object PyObject]:
818
+ pass
819
+ ctypedef class numpy.inexact [object PyObject]:
820
+ pass
821
+ ctypedef class numpy.floating [object PyObject]:
822
+ pass
823
+ ctypedef class numpy.complexfloating [object PyObject]:
824
+ pass
825
+ ctypedef class numpy.flexible [object PyObject]:
826
+ pass
827
+ ctypedef class numpy.character [object PyObject]:
828
+ pass
829
+
830
+ ctypedef struct PyDatetimeScalarObject:
831
+ # PyObject_HEAD
832
+ npy_datetime obval
833
+ PyArray_DatetimeMetaData obmeta
834
+
835
+ ctypedef struct PyTimedeltaScalarObject:
836
+ # PyObject_HEAD
837
+ npy_timedelta obval
838
+ PyArray_DatetimeMetaData obmeta
839
+
840
+ ctypedef enum NPY_DATETIMEUNIT:
841
+ NPY_FR_Y
842
+ NPY_FR_M
843
+ NPY_FR_W
844
+ NPY_FR_D
845
+ NPY_FR_B
846
+ NPY_FR_h
847
+ NPY_FR_m
848
+ NPY_FR_s
849
+ NPY_FR_ms
850
+ NPY_FR_us
851
+ NPY_FR_ns
852
+ NPY_FR_ps
853
+ NPY_FR_fs
854
+ NPY_FR_as
855
+ NPY_FR_GENERIC
856
+
857
+
858
+ #
859
+ # ufunc API
860
+ #
861
+
862
+ cdef extern from "numpy/ufuncobject.h":
863
+
864
+ ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
865
+
866
+ ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]:
867
+ cdef:
868
+ int nin, nout, nargs
869
+ int identity
870
+ PyUFuncGenericFunction *functions
871
+ void **data
872
+ int ntypes
873
+ int check_return
874
+ char *name
875
+ char *types
876
+ char *doc
877
+ void *ptr
878
+ PyObject *obj
879
+ PyObject *userloops
880
+
881
+ cdef enum:
882
+ PyUFunc_Zero
883
+ PyUFunc_One
884
+ PyUFunc_None
885
+ UFUNC_ERR_IGNORE
886
+ UFUNC_ERR_WARN
887
+ UFUNC_ERR_RAISE
888
+ UFUNC_ERR_CALL
889
+ UFUNC_ERR_PRINT
890
+ UFUNC_ERR_LOG
891
+ UFUNC_MASK_DIVIDEBYZERO
892
+ UFUNC_MASK_OVERFLOW
893
+ UFUNC_MASK_UNDERFLOW
894
+ UFUNC_MASK_INVALID
895
+ UFUNC_SHIFT_DIVIDEBYZERO
896
+ UFUNC_SHIFT_OVERFLOW
897
+ UFUNC_SHIFT_UNDERFLOW
898
+ UFUNC_SHIFT_INVALID
899
+ UFUNC_FPE_DIVIDEBYZERO
900
+ UFUNC_FPE_OVERFLOW
901
+ UFUNC_FPE_UNDERFLOW
902
+ UFUNC_FPE_INVALID
903
+ UFUNC_ERR_DEFAULT
904
+ UFUNC_ERR_DEFAULT2
905
+
906
+ object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
907
+ void **, char *, int, int, int, int, char *, char *, int)
908
+ int PyUFunc_RegisterLoopForType(ufunc, int,
909
+ PyUFuncGenericFunction, int *, void *) except -1
910
+ void PyUFunc_f_f_As_d_d \
911
+ (char **, npy_intp *, npy_intp *, void *)
912
+ void PyUFunc_d_d \
913
+ (char **, npy_intp *, npy_intp *, void *)
914
+ void PyUFunc_f_f \
915
+ (char **, npy_intp *, npy_intp *, void *)
916
+ void PyUFunc_g_g \
917
+ (char **, npy_intp *, npy_intp *, void *)
918
+ void PyUFunc_F_F_As_D_D \
919
+ (char **, npy_intp *, npy_intp *, void *)
920
+ void PyUFunc_F_F \
921
+ (char **, npy_intp *, npy_intp *, void *)
922
+ void PyUFunc_D_D \
923
+ (char **, npy_intp *, npy_intp *, void *)
924
+ void PyUFunc_G_G \
925
+ (char **, npy_intp *, npy_intp *, void *)
926
+ void PyUFunc_O_O \
927
+ (char **, npy_intp *, npy_intp *, void *)
928
+ void PyUFunc_ff_f_As_dd_d \
929
+ (char **, npy_intp *, npy_intp *, void *)
930
+ void PyUFunc_ff_f \
931
+ (char **, npy_intp *, npy_intp *, void *)
932
+ void PyUFunc_dd_d \
933
+ (char **, npy_intp *, npy_intp *, void *)
934
+ void PyUFunc_gg_g \
935
+ (char **, npy_intp *, npy_intp *, void *)
936
+ void PyUFunc_FF_F_As_DD_D \
937
+ (char **, npy_intp *, npy_intp *, void *)
938
+ void PyUFunc_DD_D \
939
+ (char **, npy_intp *, npy_intp *, void *)
940
+ void PyUFunc_FF_F \
941
+ (char **, npy_intp *, npy_intp *, void *)
942
+ void PyUFunc_GG_G \
943
+ (char **, npy_intp *, npy_intp *, void *)
944
+ void PyUFunc_OO_O \
945
+ (char **, npy_intp *, npy_intp *, void *)
946
+ void PyUFunc_O_O_method \
947
+ (char **, npy_intp *, npy_intp *, void *)
948
+ void PyUFunc_OO_O_method \
949
+ (char **, npy_intp *, npy_intp *, void *)
950
+ void PyUFunc_On_Om \
951
+ (char **, npy_intp *, npy_intp *, void *)
952
+ int PyUFunc_GetPyValues \
953
+ (char *, int *, int *, PyObject **)
954
+ int PyUFunc_checkfperr \
955
+ (int, PyObject *, int *)
956
+ void PyUFunc_clearfperr()
957
+ int PyUFunc_getfperr()
958
+ int PyUFunc_handlefperr \
959
+ (int, PyObject *, int, int *) except -1
960
+ int PyUFunc_ReplaceLoopBySignature \
961
+ (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
962
+ object PyUFunc_FromFuncAndDataAndSignature \
963
+ (PyUFuncGenericFunction *, void **, char *, int, int, int,
964
+ int, char *, char *, int, char *)
965
+
966
+ int _import_umath() except -1
967
+
968
+ cdef inline void set_array_base(ndarray arr, object base):
969
+ Py_INCREF(base) # important to do this before stealing the reference below!
970
+ PyArray_SetBaseObject(arr, base)
971
+
972
+ cdef inline object get_array_base(ndarray arr):
973
+ base = PyArray_BASE(arr)
974
+ if base is NULL:
975
+ return None
976
+ return <object>base
977
+
978
+ # Versions of the import_* functions which are more suitable for
979
+ # Cython code.
980
+ cdef inline int import_array() except -1:
981
+ try:
982
+ __pyx_import_array()
983
+ except Exception:
984
+ raise ImportError("numpy.core.multiarray failed to import")
985
+
986
+ cdef inline int import_umath() except -1:
987
+ try:
988
+ _import_umath()
989
+ except Exception:
990
+ raise ImportError("numpy.core.umath failed to import")
991
+
992
+ cdef inline int import_ufunc() except -1:
993
+ try:
994
+ _import_umath()
995
+ except Exception:
996
+ raise ImportError("numpy.core.umath failed to import")
997
+
998
+
999
+ cdef inline bint is_timedelta64_object(object obj):
1000
+ """
1001
+ Cython equivalent of `isinstance(obj, np.timedelta64)`
1002
+
1003
+ Parameters
1004
+ ----------
1005
+ obj : object
1006
+
1007
+ Returns
1008
+ -------
1009
+ bool
1010
+ """
1011
+ return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type)
1012
+
1013
+
1014
+ cdef inline bint is_datetime64_object(object obj):
1015
+ """
1016
+ Cython equivalent of `isinstance(obj, np.datetime64)`
1017
+
1018
+ Parameters
1019
+ ----------
1020
+ obj : object
1021
+
1022
+ Returns
1023
+ -------
1024
+ bool
1025
+ """
1026
+ return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type)
1027
+
1028
+
1029
+ cdef inline npy_datetime get_datetime64_value(object obj) nogil:
1030
+ """
1031
+ returns the int64 value underlying scalar numpy datetime64 object
1032
+
1033
+ Note that to interpret this as a datetime, the corresponding unit is
1034
+ also needed. That can be found using `get_datetime64_unit`.
1035
+ """
1036
+ return (<PyDatetimeScalarObject*>obj).obval
1037
+
1038
+
1039
+ cdef inline npy_timedelta get_timedelta64_value(object obj) nogil:
1040
+ """
1041
+ returns the int64 value underlying scalar numpy timedelta64 object
1042
+ """
1043
+ return (<PyTimedeltaScalarObject*>obj).obval
1044
+
1045
+
1046
+ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil:
1047
+ """
1048
+ returns the unit part of the dtype for a numpy datetime64 object.
1049
+ """
1050
+ return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base
.venv/lib/python3.11/site-packages/numpy/_distributor_init.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Distributor init file
2
+
3
+ Distributors: you can add custom code here to support particular distributions
4
+ of numpy.
5
+
6
+ For example, this is a good place to put any BLAS/LAPACK initialization code.
7
+
8
+ The numpy standard source distribution will not put code in this file, so you
9
+ can safely replace this file with your own version.
10
+ """
11
+
12
+ try:
13
+ from . import _distributor_init_local
14
+ except ImportError:
15
+ pass
.venv/lib/python3.11/site-packages/numpy/_pytesttester.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pytest test running.
3
+
4
+ This module implements the ``test()`` function for NumPy modules. The usual
5
+ boiler plate for doing that is to put the following in the module
6
+ ``__init__.py`` file::
7
+
8
+ from numpy._pytesttester import PytestTester
9
+ test = PytestTester(__name__)
10
+ del PytestTester
11
+
12
+
13
+ Warnings filtering and other runtime settings should be dealt with in the
14
+ ``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
15
+ whether or not that file is found as follows:
16
+
17
+ * ``pytest.ini`` is present (develop mode)
18
+ All warnings except those explicitly filtered out are raised as error.
19
+ * ``pytest.ini`` is absent (release mode)
20
+ DeprecationWarnings and PendingDeprecationWarnings are ignored, other
21
+ warnings are passed through.
22
+
23
+ In practice, tests run from the numpy repo are run in develop mode. That
24
+ includes the standard ``python runtests.py`` invocation.
25
+
26
+ This module is imported by every numpy subpackage, so lies at the top level to
27
+ simplify circular import issues. For the same reason, it contains no numpy
28
+ imports at module scope, instead importing numpy within function calls.
29
+ """
30
+ import sys
31
+ import os
32
+
33
+ __all__ = ['PytestTester']
34
+
35
+
36
+ def _show_numpy_info():
37
+ import numpy as np
38
+
39
+ print("NumPy version %s" % np.__version__)
40
+ relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
41
+ print("NumPy relaxed strides checking option:", relaxed_strides)
42
+ info = np.lib.utils._opt_info()
43
+ print("NumPy CPU features: ", (info if info else 'nothing enabled'))
44
+
45
+
46
+ class PytestTester:
47
+ """
48
+ Pytest test runner.
49
+
50
+ A test function is typically added to a package's __init__.py like so::
51
+
52
+ from numpy._pytesttester import PytestTester
53
+ test = PytestTester(__name__).test
54
+ del PytestTester
55
+
56
+ Calling this test function finds and runs all tests associated with the
57
+ module and all its sub-modules.
58
+
59
+ Attributes
60
+ ----------
61
+ module_name : str
62
+ Full path to the package to test.
63
+
64
+ Parameters
65
+ ----------
66
+ module_name : module name
67
+ The name of the module to test.
68
+
69
+ Notes
70
+ -----
71
+ Unlike the previous ``nose``-based implementation, this class is not
72
+ publicly exposed as it performs some ``numpy``-specific warning
73
+ suppression.
74
+
75
+ """
76
+ def __init__(self, module_name):
77
+ self.module_name = module_name
78
+
79
+ def __call__(self, label='fast', verbose=1, extra_argv=None,
80
+ doctests=False, coverage=False, durations=-1, tests=None):
81
+ """
82
+ Run tests for module using pytest.
83
+
84
+ Parameters
85
+ ----------
86
+ label : {'fast', 'full'}, optional
87
+ Identifies the tests to run. When set to 'fast', tests decorated
88
+ with `pytest.mark.slow` are skipped, when 'full', the slow marker
89
+ is ignored.
90
+ verbose : int, optional
91
+ Verbosity value for test outputs, in the range 1-3. Default is 1.
92
+ extra_argv : list, optional
93
+ List with any extra arguments to pass to pytests.
94
+ doctests : bool, optional
95
+ .. note:: Not supported
96
+ coverage : bool, optional
97
+ If True, report coverage of NumPy code. Default is False.
98
+ Requires installation of (pip) pytest-cov.
99
+ durations : int, optional
100
+ If < 0, do nothing, If 0, report time of all tests, if > 0,
101
+ report the time of the slowest `timer` tests. Default is -1.
102
+ tests : test or list of tests
103
+ Tests to be executed with pytest '--pyargs'
104
+
105
+ Returns
106
+ -------
107
+ result : bool
108
+ Return True on success, false otherwise.
109
+
110
+ Notes
111
+ -----
112
+ Each NumPy module exposes `test` in its namespace to run all tests for
113
+ it. For example, to run all tests for numpy.lib:
114
+
115
+ >>> np.lib.test() #doctest: +SKIP
116
+
117
+ Examples
118
+ --------
119
+ >>> result = np.lib.test() #doctest: +SKIP
120
+ ...
121
+ 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
122
+ >>> result
123
+ True
124
+
125
+ """
126
+ import pytest
127
+ import warnings
128
+
129
+ module = sys.modules[self.module_name]
130
+ module_path = os.path.abspath(module.__path__[0])
131
+
132
+ # setup the pytest arguments
133
+ pytest_args = ["-l"]
134
+
135
+ # offset verbosity. The "-q" cancels a "-v".
136
+ pytest_args += ["-q"]
137
+
138
+ if sys.version_info < (3, 12):
139
+ with warnings.catch_warnings():
140
+ warnings.simplefilter("always")
141
+ # Filter out distutils cpu warnings (could be localized to
142
+ # distutils tests). ASV has problems with top level import,
143
+ # so fetch module for suppression here.
144
+ from numpy.distutils import cpuinfo
145
+
146
+ with warnings.catch_warnings(record=True):
147
+ # Ignore the warning from importing the array_api submodule. This
148
+ # warning is done on import, so it would break pytest collection,
149
+ # but importing it early here prevents the warning from being
150
+ # issued when it imported again.
151
+ import numpy.array_api
152
+
153
+ # Filter out annoying import messages. Want these in both develop and
154
+ # release mode.
155
+ pytest_args += [
156
+ "-W ignore:Not importing directory",
157
+ "-W ignore:numpy.dtype size changed",
158
+ "-W ignore:numpy.ufunc size changed",
159
+ "-W ignore::UserWarning:cpuinfo",
160
+ ]
161
+
162
+ # When testing matrices, ignore their PendingDeprecationWarnings
163
+ pytest_args += [
164
+ "-W ignore:the matrix subclass is not",
165
+ "-W ignore:Importing from numpy.matlib is",
166
+ ]
167
+
168
+ if doctests:
169
+ pytest_args += ["--doctest-modules"]
170
+
171
+ if extra_argv:
172
+ pytest_args += list(extra_argv)
173
+
174
+ if verbose > 1:
175
+ pytest_args += ["-" + "v"*(verbose - 1)]
176
+
177
+ if coverage:
178
+ pytest_args += ["--cov=" + module_path]
179
+
180
+ if label == "fast":
181
+ # not importing at the top level to avoid circular import of module
182
+ from numpy.testing import IS_PYPY
183
+ if IS_PYPY:
184
+ pytest_args += ["-m", "not slow and not slow_pypy"]
185
+ else:
186
+ pytest_args += ["-m", "not slow"]
187
+
188
+ elif label != "full":
189
+ pytest_args += ["-m", label]
190
+
191
+ if durations >= 0:
192
+ pytest_args += ["--durations=%s" % durations]
193
+
194
+ if tests is None:
195
+ tests = [self.module_name]
196
+
197
+ pytest_args += ["--pyargs"] + list(tests)
198
+
199
+ # run tests.
200
+ _show_numpy_info()
201
+
202
+ try:
203
+ code = pytest.main(pytest_args)
204
+ except SystemExit as exc:
205
+ code = exc.code
206
+
207
+ return code == 0
.venv/lib/python3.11/site-packages/numpy/_pytesttester.pyi ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterable
2
+ from typing import Literal as L
3
+
4
+ __all__: list[str]
5
+
6
+ class PytestTester:
7
+ module_name: str
8
+ def __init__(self, module_name: str) -> None: ...
9
+ def __call__(
10
+ self,
11
+ label: L["fast", "full"] = ...,
12
+ verbose: int = ...,
13
+ extra_argv: None | Iterable[str] = ...,
14
+ doctests: L[False] = ...,
15
+ coverage: bool = ...,
16
+ durations: int = ...,
17
+ tests: None | Iterable[str] = ...,
18
+ ) -> bool: ...
.venv/lib/python3.11/site-packages/numpy/conftest.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pytest configuration and fixtures for the Numpy test suite.
3
+ """
4
+ import os
5
+ import tempfile
6
+
7
+ import hypothesis
8
+ import pytest
9
+ import numpy
10
+
11
+ from numpy.core._multiarray_tests import get_fpu_mode
12
+
13
+
14
+ _old_fpu_mode = None
15
+ _collect_results = {}
16
+
17
+ # Use a known and persistent tmpdir for hypothesis' caches, which
18
+ # can be automatically cleared by the OS or user.
19
+ hypothesis.configuration.set_hypothesis_home_dir(
20
+ os.path.join(tempfile.gettempdir(), ".hypothesis")
21
+ )
22
+
23
+ # We register two custom profiles for Numpy - for details see
24
+ # https://hypothesis.readthedocs.io/en/latest/settings.html
25
+ # The first is designed for our own CI runs; the latter also
26
+ # forces determinism and is designed for use via np.test()
27
+ hypothesis.settings.register_profile(
28
+ name="numpy-profile", deadline=None, print_blob=True,
29
+ )
30
+ hypothesis.settings.register_profile(
31
+ name="np.test() profile",
32
+ deadline=None, print_blob=True, database=None, derandomize=True,
33
+ suppress_health_check=list(hypothesis.HealthCheck),
34
+ )
35
+ # Note that the default profile is chosen based on the presence
36
+ # of pytest.ini, but can be overridden by passing the
37
+ # --hypothesis-profile=NAME argument to pytest.
38
+ _pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
39
+ hypothesis.settings.load_profile(
40
+ "numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
41
+ )
42
+
43
+ # The experimentalAPI is used in _umath_tests
44
+ os.environ["NUMPY_EXPERIMENTAL_DTYPE_API"] = "1"
45
+
46
+ def pytest_configure(config):
47
+ config.addinivalue_line("markers",
48
+ "valgrind_error: Tests that are known to error under valgrind.")
49
+ config.addinivalue_line("markers",
50
+ "leaks_references: Tests that are known to leak references.")
51
+ config.addinivalue_line("markers",
52
+ "slow: Tests that are very slow.")
53
+ config.addinivalue_line("markers",
54
+ "slow_pypy: Tests that are very slow on pypy.")
55
+
56
+
57
+ def pytest_addoption(parser):
58
+ parser.addoption("--available-memory", action="store", default=None,
59
+ help=("Set amount of memory available for running the "
60
+ "test suite. This can result to tests requiring "
61
+ "especially large amounts of memory to be skipped. "
62
+ "Equivalent to setting environment variable "
63
+ "NPY_AVAILABLE_MEM. Default: determined"
64
+ "automatically."))
65
+
66
+
67
+ def pytest_sessionstart(session):
68
+ available_mem = session.config.getoption('available_memory')
69
+ if available_mem is not None:
70
+ os.environ['NPY_AVAILABLE_MEM'] = available_mem
71
+
72
+
73
+ #FIXME when yield tests are gone.
74
+ @pytest.hookimpl()
75
+ def pytest_itemcollected(item):
76
+ """
77
+ Check FPU precision mode was not changed during test collection.
78
+
79
+ The clumsy way we do it here is mainly necessary because numpy
80
+ still uses yield tests, which can execute code at test collection
81
+ time.
82
+ """
83
+ global _old_fpu_mode
84
+
85
+ mode = get_fpu_mode()
86
+
87
+ if _old_fpu_mode is None:
88
+ _old_fpu_mode = mode
89
+ elif mode != _old_fpu_mode:
90
+ _collect_results[item] = (_old_fpu_mode, mode)
91
+ _old_fpu_mode = mode
92
+
93
+
94
+ @pytest.fixture(scope="function", autouse=True)
95
+ def check_fpu_mode(request):
96
+ """
97
+ Check FPU precision mode was not changed during the test.
98
+ """
99
+ old_mode = get_fpu_mode()
100
+ yield
101
+ new_mode = get_fpu_mode()
102
+
103
+ if old_mode != new_mode:
104
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
105
+ " during the test".format(old_mode, new_mode))
106
+
107
+ collect_result = _collect_results.get(request.node)
108
+ if collect_result is not None:
109
+ old_mode, new_mode = collect_result
110
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
111
+ " when collecting the test".format(old_mode,
112
+ new_mode))
113
+
114
+
115
+ @pytest.fixture(autouse=True)
116
+ def add_np(doctest_namespace):
117
+ doctest_namespace['np'] = numpy
118
+
119
+ @pytest.fixture(autouse=True)
120
+ def env_setup(monkeypatch):
121
+ monkeypatch.setenv('PYTHONHASHSEED', '0')
122
+
123
+
124
+ @pytest.fixture(params=[True, False])
125
+ def weak_promotion(request):
126
+ """
127
+ Fixture to ensure "legacy" promotion state or change it to use the new
128
+ weak promotion (plus warning). `old_promotion` should be used as a
129
+ parameter in the function.
130
+ """
131
+ state = numpy._get_promotion_state()
132
+ if request.param:
133
+ numpy._set_promotion_state("weak_and_warn")
134
+ else:
135
+ numpy._set_promotion_state("legacy")
136
+
137
+ yield request.param
138
+ numpy._set_promotion_state(state)
.venv/lib/python3.11/site-packages/numpy/dtypes.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ DType classes and utility (:mod:`numpy.dtypes`)
3
+ ===============================================
4
+
5
+ This module is home to specific dtypes related functionality and their classes.
6
+ For more general information about dtypes, also see `numpy.dtype` and
7
+ :ref:`arrays.dtypes`.
8
+
9
+ Similar to the builtin ``types`` module, this submodule defines types (classes)
10
+ that are not widely used directly.
11
+
12
+ .. versionadded:: NumPy 1.25
13
+
14
+ The dtypes module is new in NumPy 1.25. Previously DType classes were
15
+ only accessible indirectly.
16
+
17
+
18
+ DType classes
19
+ -------------
20
+
21
+ The following are the classes of the corresponding NumPy dtype instances and
22
+ NumPy scalar types. The classes can be used in ``isinstance`` checks and can
23
+ also be instantiated or used directly. Direct use of these classes is not
24
+ typical, since their scalar counterparts (e.g. ``np.float64``) or strings
25
+ like ``"float64"`` can be used.
26
+
27
+ .. list-table::
28
+ :header-rows: 1
29
+
30
+ * - Group
31
+ - DType class
32
+
33
+ * - Boolean
34
+ - ``BoolDType``
35
+
36
+ * - Bit-sized integers
37
+ - ``Int8DType``, ``UInt8DType``, ``Int16DType``, ``UInt16DType``,
38
+ ``Int32DType``, ``UInt32DType``, ``Int64DType``, ``UInt64DType``
39
+
40
+ * - C-named integers (may be aliases)
41
+ - ``ByteDType``, ``UByteDType``, ``ShortDType``, ``UShortDType``,
42
+ ``IntDType``, ``UIntDType``, ``LongDType``, ``ULongDType``,
43
+ ``LongLongDType``, ``ULongLongDType``
44
+
45
+ * - Floating point
46
+ - ``Float16DType``, ``Float32DType``, ``Float64DType``,
47
+ ``LongDoubleDType``
48
+
49
+ * - Complex
50
+ - ``Complex64DType``, ``Complex128DType``, ``CLongDoubleDType``
51
+
52
+ * - Strings
53
+ - ``BytesDType``, ``BytesDType``
54
+
55
+ * - Times
56
+ - ``DateTime64DType``, ``TimeDelta64DType``
57
+
58
+ * - Others
59
+ - ``ObjectDType``, ``VoidDType``
60
+
61
+ """
62
+
63
+ __all__ = []
64
+
65
+
66
+ def _add_dtype_helper(DType, alias):
67
+ # Function to add DTypes a bit more conveniently without channeling them
68
+ # through `numpy.core._multiarray_umath` namespace or similar.
69
+ from numpy import dtypes
70
+
71
+ setattr(dtypes, DType.__name__, DType)
72
+ __all__.append(DType.__name__)
73
+
74
+ if alias:
75
+ alias = alias.removeprefix("numpy.dtypes.")
76
+ setattr(dtypes, alias, DType)
77
+ __all__.append(alias)
.venv/lib/python3.11/site-packages/numpy/dtypes.pyi ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ __all__: list[str]
5
+
6
+ # Boolean:
7
+ BoolDType = np.dtype[np.bool_]
8
+ # Sized integers:
9
+ Int8DType = np.dtype[np.int8]
10
+ UInt8DType = np.dtype[np.uint8]
11
+ Int16DType = np.dtype[np.int16]
12
+ UInt16DType = np.dtype[np.uint16]
13
+ Int32DType = np.dtype[np.int32]
14
+ UInt32DType = np.dtype[np.uint32]
15
+ Int64DType = np.dtype[np.int64]
16
+ UInt64DType = np.dtype[np.uint64]
17
+ # Standard C-named version/alias:
18
+ ByteDType = np.dtype[np.byte]
19
+ UByteDType = np.dtype[np.ubyte]
20
+ ShortDType = np.dtype[np.short]
21
+ UShortDType = np.dtype[np.ushort]
22
+ IntDType = np.dtype[np.intc]
23
+ UIntDType = np.dtype[np.uintc]
24
+ LongDType = np.dtype[np.int_] # Unfortunately, the correct scalar
25
+ ULongDType = np.dtype[np.uint] # Unfortunately, the correct scalar
26
+ LongLongDType = np.dtype[np.longlong]
27
+ ULongLongDType = np.dtype[np.ulonglong]
28
+ # Floats
29
+ Float16DType = np.dtype[np.float16]
30
+ Float32DType = np.dtype[np.float32]
31
+ Float64DType = np.dtype[np.float64]
32
+ LongDoubleDType = np.dtype[np.longdouble]
33
+ # Complex:
34
+ Complex64DType = np.dtype[np.complex64]
35
+ Complex128DType = np.dtype[np.complex128]
36
+ CLongDoubleDType = np.dtype[np.clongdouble]
37
+ # Others:
38
+ ObjectDType = np.dtype[np.object_]
39
+ BytesDType = np.dtype[np.bytes_]
40
+ StrDType = np.dtype[np.str_]
41
+ VoidDType = np.dtype[np.void]
42
+ DateTime64DType = np.dtype[np.datetime64]
43
+ TimeDelta64DType = np.dtype[np.timedelta64]
.venv/lib/python3.11/site-packages/numpy/exceptions.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Exceptions and Warnings (:mod:`numpy.exceptions`)
3
+ =================================================
4
+
5
+ General exceptions used by NumPy. Note that some exceptions may be module
6
+ specific, such as linear algebra errors.
7
+
8
+ .. versionadded:: NumPy 1.25
9
+
10
+ The exceptions module is new in NumPy 1.25. Older exceptions remain
11
+ available through the main NumPy namespace for compatibility.
12
+
13
+ .. currentmodule:: numpy.exceptions
14
+
15
+ Warnings
16
+ --------
17
+ .. autosummary::
18
+ :toctree: generated/
19
+
20
+ ComplexWarning Given when converting complex to real.
21
+ VisibleDeprecationWarning Same as a DeprecationWarning, but more visible.
22
+
23
+ Exceptions
24
+ ----------
25
+ .. autosummary::
26
+ :toctree: generated/
27
+
28
+ AxisError Given when an axis was invalid.
29
+ DTypePromotionError Given when no common dtype could be found.
30
+ TooHardError Error specific to `numpy.shares_memory`.
31
+
32
+ """
33
+
34
+
35
+ __all__ = [
36
+ "ComplexWarning", "VisibleDeprecationWarning", "ModuleDeprecationWarning",
37
+ "TooHardError", "AxisError", "DTypePromotionError"]
38
+
39
+
40
+ # Disallow reloading this module so as to preserve the identities of the
41
+ # classes defined here.
42
+ if '_is_loaded' in globals():
43
+ raise RuntimeError('Reloading numpy._globals is not allowed')
44
+ _is_loaded = True
45
+
46
+
47
+ class ComplexWarning(RuntimeWarning):
48
+ """
49
+ The warning raised when casting a complex dtype to a real dtype.
50
+
51
+ As implemented, casting a complex number to a real discards its imaginary
52
+ part, but this behavior may not be what the user actually wants.
53
+
54
+ """
55
+ pass
56
+
57
+
58
+ class ModuleDeprecationWarning(DeprecationWarning):
59
+ """Module deprecation warning.
60
+
61
+ .. warning::
62
+
63
+ This warning should not be used, since nose testing is not relevant
64
+ anymore.
65
+
66
+ The nose tester turns ordinary Deprecation warnings into test failures.
67
+ That makes it hard to deprecate whole modules, because they get
68
+ imported by default. So this is a special Deprecation warning that the
69
+ nose tester will let pass without making tests fail.
70
+
71
+ """
72
+
73
+
74
+ class VisibleDeprecationWarning(UserWarning):
75
+ """Visible deprecation warning.
76
+
77
+ By default, python will not show deprecation warnings, so this class
78
+ can be used when a very visible warning is helpful, for example because
79
+ the usage is most likely a user bug.
80
+
81
+ """
82
+
83
+
84
+ # Exception used in shares_memory()
85
+ class TooHardError(RuntimeError):
86
+ """max_work was exceeded.
87
+
88
+ This is raised whenever the maximum number of candidate solutions
89
+ to consider specified by the ``max_work`` parameter is exceeded.
90
+ Assigning a finite number to max_work may have caused the operation
91
+ to fail.
92
+
93
+ """
94
+
95
+ pass
96
+
97
+
98
+ class AxisError(ValueError, IndexError):
99
+ """Axis supplied was invalid.
100
+
101
+ This is raised whenever an ``axis`` parameter is specified that is larger
102
+ than the number of array dimensions.
103
+ For compatibility with code written against older numpy versions, which
104
+ raised a mixture of `ValueError` and `IndexError` for this situation, this
105
+ exception subclasses both to ensure that ``except ValueError`` and
106
+ ``except IndexError`` statements continue to catch `AxisError`.
107
+
108
+ .. versionadded:: 1.13
109
+
110
+ Parameters
111
+ ----------
112
+ axis : int or str
113
+ The out of bounds axis or a custom exception message.
114
+ If an axis is provided, then `ndim` should be specified as well.
115
+ ndim : int, optional
116
+ The number of array dimensions.
117
+ msg_prefix : str, optional
118
+ A prefix for the exception message.
119
+
120
+ Attributes
121
+ ----------
122
+ axis : int, optional
123
+ The out of bounds axis or ``None`` if a custom exception
124
+ message was provided. This should be the axis as passed by
125
+ the user, before any normalization to resolve negative indices.
126
+
127
+ .. versionadded:: 1.22
128
+ ndim : int, optional
129
+ The number of array dimensions or ``None`` if a custom exception
130
+ message was provided.
131
+
132
+ .. versionadded:: 1.22
133
+
134
+
135
+ Examples
136
+ --------
137
+ >>> array_1d = np.arange(10)
138
+ >>> np.cumsum(array_1d, axis=1)
139
+ Traceback (most recent call last):
140
+ ...
141
+ numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1
142
+
143
+ Negative axes are preserved:
144
+
145
+ >>> np.cumsum(array_1d, axis=-2)
146
+ Traceback (most recent call last):
147
+ ...
148
+ numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1
149
+
150
+ The class constructor generally takes the axis and arrays'
151
+ dimensionality as arguments:
152
+
153
+ >>> print(np.AxisError(2, 1, msg_prefix='error'))
154
+ error: axis 2 is out of bounds for array of dimension 1
155
+
156
+ Alternatively, a custom exception message can be passed:
157
+
158
+ >>> print(np.AxisError('Custom error message'))
159
+ Custom error message
160
+
161
+ """
162
+
163
+ __slots__ = ("axis", "ndim", "_msg")
164
+
165
+ def __init__(self, axis, ndim=None, msg_prefix=None):
166
+ if ndim is msg_prefix is None:
167
+ # single-argument form: directly set the error message
168
+ self._msg = axis
169
+ self.axis = None
170
+ self.ndim = None
171
+ else:
172
+ self._msg = msg_prefix
173
+ self.axis = axis
174
+ self.ndim = ndim
175
+
176
+ def __str__(self):
177
+ axis = self.axis
178
+ ndim = self.ndim
179
+
180
+ if axis is ndim is None:
181
+ return self._msg
182
+ else:
183
+ msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
184
+ if self._msg is not None:
185
+ msg = f"{self._msg}: {msg}"
186
+ return msg
187
+
188
+
189
+ class DTypePromotionError(TypeError):
190
+ """Multiple DTypes could not be converted to a common one.
191
+
192
+ This exception derives from ``TypeError`` and is raised whenever dtypes
193
+ cannot be converted to a single common one. This can be because they
194
+ are of a different category/class or incompatible instances of the same
195
+ one (see Examples).
196
+
197
+ Notes
198
+ -----
199
+ Many functions will use promotion to find the correct result and
200
+ implementation. For these functions the error will typically be chained
201
+ with a more specific error indicating that no implementation was found
202
+ for the input dtypes.
203
+
204
+ Typically promotion should be considered "invalid" between the dtypes of
205
+ two arrays when `arr1 == arr2` can safely return all ``False`` because the
206
+ dtypes are fundamentally different.
207
+
208
+ Examples
209
+ --------
210
+ Datetimes and complex numbers are incompatible classes and cannot be
211
+ promoted:
212
+
213
+ >>> np.result_type(np.dtype("M8[s]"), np.complex128)
214
+ DTypePromotionError: The DType <class 'numpy.dtype[datetime64]'> could not
215
+ be promoted by <class 'numpy.dtype[complex128]'>. This means that no common
216
+ DType exists for the given inputs. For example they cannot be stored in a
217
+ single array unless the dtype is `object`. The full list of DTypes is:
218
+ (<class 'numpy.dtype[datetime64]'>, <class 'numpy.dtype[complex128]'>)
219
+
220
+ For example for structured dtypes, the structure can mismatch and the
221
+ same ``DTypePromotionError`` is given when two structured dtypes with
222
+ a mismatch in their number of fields is given:
223
+
224
+ >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)])
225
+ >>> dtype2 = np.dtype([("field1", np.float64)])
226
+ >>> np.promote_types(dtype1, dtype2)
227
+ DTypePromotionError: field names `('field1', 'field2')` and `('field1',)`
228
+ mismatch.
229
+
230
+ """
231
+ pass
.venv/lib/python3.11/site-packages/numpy/py.typed ADDED
File without changes
.venv/lib/python3.11/site-packages/numpy/version.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+
2
+ version = "1.26.4"
3
+ __version__ = version
4
+ full_version = version
5
+
6
+ git_revision = "9815c16f449e12915ef35a8255329ba26dacd5c0"
7
+ release = 'dev' not in version and '+' not in version
8
+ short_version = version.split("+")[0]
.venv/lib/python3.11/site-packages/psutil/__init__.py ADDED
@@ -0,0 +1,2486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
4
+ # Use of this source code is governed by a BSD-style license that can be
5
+ # found in the LICENSE file.
6
+
7
+ """psutil is a cross-platform library for retrieving information on
8
+ running processes and system utilization (CPU, memory, disks, network,
9
+ sensors) in Python. Supported platforms:
10
+
11
+ - Linux
12
+ - Windows
13
+ - macOS
14
+ - FreeBSD
15
+ - OpenBSD
16
+ - NetBSD
17
+ - Sun Solaris
18
+ - AIX
19
+
20
+ Works with Python versions 2.7 and 3.6+.
21
+ """
22
+
23
+ from __future__ import division
24
+
25
+ import collections
26
+ import contextlib
27
+ import datetime
28
+ import functools
29
+ import os
30
+ import signal
31
+ import subprocess
32
+ import sys
33
+ import threading
34
+ import time
35
+
36
+
37
+ try:
38
+ import pwd
39
+ except ImportError:
40
+ pwd = None
41
+
42
+ from . import _common
43
+ from ._common import AIX
44
+ from ._common import BSD
45
+ from ._common import CONN_CLOSE
46
+ from ._common import CONN_CLOSE_WAIT
47
+ from ._common import CONN_CLOSING
48
+ from ._common import CONN_ESTABLISHED
49
+ from ._common import CONN_FIN_WAIT1
50
+ from ._common import CONN_FIN_WAIT2
51
+ from ._common import CONN_LAST_ACK
52
+ from ._common import CONN_LISTEN
53
+ from ._common import CONN_NONE
54
+ from ._common import CONN_SYN_RECV
55
+ from ._common import CONN_SYN_SENT
56
+ from ._common import CONN_TIME_WAIT
57
+ from ._common import FREEBSD # NOQA
58
+ from ._common import LINUX
59
+ from ._common import MACOS
60
+ from ._common import NETBSD # NOQA
61
+ from ._common import NIC_DUPLEX_FULL
62
+ from ._common import NIC_DUPLEX_HALF
63
+ from ._common import NIC_DUPLEX_UNKNOWN
64
+ from ._common import OPENBSD # NOQA
65
+ from ._common import OSX # deprecated alias
66
+ from ._common import POSIX # NOQA
67
+ from ._common import POWER_TIME_UNKNOWN
68
+ from ._common import POWER_TIME_UNLIMITED
69
+ from ._common import STATUS_DEAD
70
+ from ._common import STATUS_DISK_SLEEP
71
+ from ._common import STATUS_IDLE
72
+ from ._common import STATUS_LOCKED
73
+ from ._common import STATUS_PARKED
74
+ from ._common import STATUS_RUNNING
75
+ from ._common import STATUS_SLEEPING
76
+ from ._common import STATUS_STOPPED
77
+ from ._common import STATUS_TRACING_STOP
78
+ from ._common import STATUS_WAITING
79
+ from ._common import STATUS_WAKING
80
+ from ._common import STATUS_ZOMBIE
81
+ from ._common import SUNOS
82
+ from ._common import WINDOWS
83
+ from ._common import AccessDenied
84
+ from ._common import Error
85
+ from ._common import NoSuchProcess
86
+ from ._common import TimeoutExpired
87
+ from ._common import ZombieProcess
88
+ from ._common import debug
89
+ from ._common import memoize_when_activated
90
+ from ._common import wrap_numbers as _wrap_numbers
91
+ from ._compat import PY3 as _PY3
92
+ from ._compat import PermissionError
93
+ from ._compat import ProcessLookupError
94
+ from ._compat import SubprocessTimeoutExpired as _SubprocessTimeoutExpired
95
+ from ._compat import long
96
+
97
+
98
+ if LINUX:
99
+ # This is public API and it will be retrieved from _pslinux.py
100
+ # via sys.modules.
101
+ PROCFS_PATH = "/proc"
102
+
103
+ from . import _pslinux as _psplatform
104
+ from ._pslinux import IOPRIO_CLASS_BE # NOQA
105
+ from ._pslinux import IOPRIO_CLASS_IDLE # NOQA
106
+ from ._pslinux import IOPRIO_CLASS_NONE # NOQA
107
+ from ._pslinux import IOPRIO_CLASS_RT # NOQA
108
+
109
+ elif WINDOWS:
110
+ from . import _pswindows as _psplatform
111
+ from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # NOQA
112
+ from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # NOQA
113
+ from ._psutil_windows import HIGH_PRIORITY_CLASS # NOQA
114
+ from ._psutil_windows import IDLE_PRIORITY_CLASS # NOQA
115
+ from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA
116
+ from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA
117
+ from ._pswindows import CONN_DELETE_TCB # NOQA
118
+ from ._pswindows import IOPRIO_HIGH # NOQA
119
+ from ._pswindows import IOPRIO_LOW # NOQA
120
+ from ._pswindows import IOPRIO_NORMAL # NOQA
121
+ from ._pswindows import IOPRIO_VERYLOW # NOQA
122
+
123
+ elif MACOS:
124
+ from . import _psosx as _psplatform
125
+
126
+ elif BSD:
127
+ from . import _psbsd as _psplatform
128
+
129
+ elif SUNOS:
130
+ from . import _pssunos as _psplatform
131
+ from ._pssunos import CONN_BOUND # NOQA
132
+ from ._pssunos import CONN_IDLE # NOQA
133
+
134
+ # This is public writable API which is read from _pslinux.py and
135
+ # _pssunos.py via sys.modules.
136
+ PROCFS_PATH = "/proc"
137
+
138
+ elif AIX:
139
+ from . import _psaix as _psplatform
140
+
141
+ # This is public API and it will be retrieved from _pslinux.py
142
+ # via sys.modules.
143
+ PROCFS_PATH = "/proc"
144
+
145
+ else: # pragma: no cover
146
+ raise NotImplementedError('platform %s is not supported' % sys.platform)
147
+
148
+
149
+ # fmt: off
150
+ __all__ = [
151
+ # exceptions
152
+ "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
153
+ "TimeoutExpired",
154
+
155
+ # constants
156
+ "version_info", "__version__",
157
+
158
+ "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
159
+ "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
160
+ "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
161
+ "STATUS_PARKED",
162
+
163
+ "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
164
+ "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
165
+ "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
166
+ # "CONN_IDLE", "CONN_BOUND",
167
+
168
+ "AF_LINK",
169
+
170
+ "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
171
+
172
+ "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
173
+
174
+ "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
175
+ "SUNOS", "WINDOWS", "AIX",
176
+
177
+ # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA",
178
+ # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE",
179
+ # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE",
180
+ # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING",
181
+
182
+ # classes
183
+ "Process", "Popen",
184
+
185
+ # functions
186
+ "pid_exists", "pids", "process_iter", "wait_procs", # proc
187
+ "virtual_memory", "swap_memory", # memory
188
+ "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
189
+ "cpu_stats", # "cpu_freq", "getloadavg"
190
+ "net_io_counters", "net_connections", "net_if_addrs", # network
191
+ "net_if_stats",
192
+ "disk_io_counters", "disk_partitions", "disk_usage", # disk
193
+ # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
194
+ "users", "boot_time", # others
195
+ ]
196
+ # fmt: on
197
+
198
+
199
+ __all__.extend(_psplatform.__extra__all__)
200
+
201
+ # Linux, FreeBSD
202
+ if hasattr(_psplatform.Process, "rlimit"):
203
+ # Populate global namespace with RLIM* constants.
204
+ from . import _psutil_posix
205
+
206
+ _globals = globals()
207
+ _name = None
208
+ for _name in dir(_psutil_posix):
209
+ if _name.startswith('RLIM') and _name.isupper():
210
+ _globals[_name] = getattr(_psutil_posix, _name)
211
+ __all__.append(_name)
212
+ del _globals, _name
213
+
214
+ AF_LINK = _psplatform.AF_LINK
215
+
216
+ __author__ = "Giampaolo Rodola'"
217
+ __version__ = "6.1.1"
218
+ version_info = tuple([int(num) for num in __version__.split('.')])
219
+
220
+ _timer = getattr(time, 'monotonic', time.time)
221
+ _TOTAL_PHYMEM = None
222
+ _LOWEST_PID = None
223
+ _SENTINEL = object()
224
+
225
+ # Sanity check in case the user messed up with psutil installation
226
+ # or did something weird with sys.path. In this case we might end
227
+ # up importing a python module using a C extension module which
228
+ # was compiled for a different version of psutil.
229
+ # We want to prevent that by failing sooner rather than later.
230
+ # See: https://github.com/giampaolo/psutil/issues/564
231
+ if int(__version__.replace('.', '')) != getattr(
232
+ _psplatform.cext, 'version', None
233
+ ):
234
+ msg = "version conflict: %r C extension " % _psplatform.cext.__file__
235
+ msg += "module was built for another version of psutil"
236
+ if hasattr(_psplatform.cext, 'version'):
237
+ msg += " (%s instead of %s)" % (
238
+ '.'.join([x for x in str(_psplatform.cext.version)]),
239
+ __version__,
240
+ )
241
+ else:
242
+ msg += " (different than %s)" % __version__
243
+ msg += "; you may try to 'pip uninstall psutil', manually remove %s" % (
244
+ getattr(
245
+ _psplatform.cext,
246
+ "__file__",
247
+ "the existing psutil install directory",
248
+ )
249
+ )
250
+ msg += " or clean the virtual env somehow, then reinstall"
251
+ raise ImportError(msg)
252
+
253
+
254
+ # =====================================================================
255
+ # --- Utils
256
+ # =====================================================================
257
+
258
+
259
+ if hasattr(_psplatform, 'ppid_map'):
260
+ # Faster version (Windows and Linux).
261
+ _ppid_map = _psplatform.ppid_map
262
+ else: # pragma: no cover
263
+
264
+ def _ppid_map():
265
+ """Return a {pid: ppid, ...} dict for all running processes in
266
+ one shot. Used to speed up Process.children().
267
+ """
268
+ ret = {}
269
+ for pid in pids():
270
+ try:
271
+ ret[pid] = _psplatform.Process(pid).ppid()
272
+ except (NoSuchProcess, ZombieProcess):
273
+ pass
274
+ return ret
275
+
276
+
277
+ def _pprint_secs(secs):
278
+ """Format seconds in a human readable form."""
279
+ now = time.time()
280
+ secs_ago = int(now - secs)
281
+ fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S"
282
+ return datetime.datetime.fromtimestamp(secs).strftime(fmt)
283
+
284
+
285
+ # =====================================================================
286
+ # --- Process class
287
+ # =====================================================================
288
+
289
+
290
+ class Process(object): # noqa: UP004
291
+ """Represents an OS process with the given PID.
292
+ If PID is omitted current process PID (os.getpid()) is used.
293
+ Raise NoSuchProcess if PID does not exist.
294
+
295
+ Note that most of the methods of this class do not make sure that
296
+ the PID of the process being queried has been reused. That means
297
+ that you may end up retrieving information for another process.
298
+
299
+ The only exceptions for which process identity is pre-emptively
300
+ checked and guaranteed are:
301
+
302
+ - parent()
303
+ - children()
304
+ - nice() (set)
305
+ - ionice() (set)
306
+ - rlimit() (set)
307
+ - cpu_affinity (set)
308
+ - suspend()
309
+ - resume()
310
+ - send_signal()
311
+ - terminate()
312
+ - kill()
313
+
314
+ To prevent this problem for all other methods you can use
315
+ is_running() before querying the process.
316
+ """
317
+
318
+ def __init__(self, pid=None):
319
+ self._init(pid)
320
+
321
+ def _init(self, pid, _ignore_nsp=False):
322
+ if pid is None:
323
+ pid = os.getpid()
324
+ else:
325
+ if not _PY3 and not isinstance(pid, (int, long)):
326
+ msg = "pid must be an integer (got %r)" % pid
327
+ raise TypeError(msg)
328
+ if pid < 0:
329
+ msg = "pid must be a positive integer (got %s)" % pid
330
+ raise ValueError(msg)
331
+ try:
332
+ _psplatform.cext.check_pid_range(pid)
333
+ except OverflowError:
334
+ msg = "process PID out of range (got %s)" % pid
335
+ raise NoSuchProcess(pid, msg=msg)
336
+
337
+ self._pid = pid
338
+ self._name = None
339
+ self._exe = None
340
+ self._create_time = None
341
+ self._gone = False
342
+ self._pid_reused = False
343
+ self._hash = None
344
+ self._lock = threading.RLock()
345
+ # used for caching on Windows only (on POSIX ppid may change)
346
+ self._ppid = None
347
+ # platform-specific modules define an _psplatform.Process
348
+ # implementation class
349
+ self._proc = _psplatform.Process(pid)
350
+ self._last_sys_cpu_times = None
351
+ self._last_proc_cpu_times = None
352
+ self._exitcode = _SENTINEL
353
+ self._ident = (self.pid, None)
354
+ try:
355
+ self._ident = self._get_ident()
356
+ except AccessDenied:
357
+ # This should happen on Windows only, since we use the fast
358
+ # create time method. AFAIK, on all other platforms we are
359
+ # able to get create time for all PIDs.
360
+ pass
361
+ except ZombieProcess:
362
+ # Zombies can still be queried by this class (although
363
+ # not always) and pids() return them so just go on.
364
+ pass
365
+ except NoSuchProcess:
366
+ if not _ignore_nsp:
367
+ msg = "process PID not found"
368
+ raise NoSuchProcess(pid, msg=msg)
369
+ else:
370
+ self._gone = True
371
+
372
+ def _get_ident(self):
373
+ """Return a (pid, uid) tuple which is supposed to identify a
374
+ Process instance univocally over time. The PID alone is not
375
+ enough, as it can be assigned to a new process after this one
376
+ terminates, so we add process creation time to the mix. We need
377
+ this in order to prevent killing the wrong process later on.
378
+ This is also known as PID reuse or PID recycling problem.
379
+
380
+ The reliability of this strategy mostly depends on
381
+ create_time() precision, which is 0.01 secs on Linux. The
382
+ assumption is that, after a process terminates, the kernel
383
+ won't reuse the same PID after such a short period of time
384
+ (0.01 secs). Technically this is inherently racy, but
385
+ practically it should be good enough.
386
+ """
387
+ if WINDOWS:
388
+ # Use create_time() fast method in order to speedup
389
+ # `process_iter()`. This means we'll get AccessDenied for
390
+ # most ADMIN processes, but that's fine since it means
391
+ # we'll also get AccessDenied on kill().
392
+ # https://github.com/giampaolo/psutil/issues/2366#issuecomment-2381646555
393
+ self._create_time = self._proc.create_time(fast_only=True)
394
+ return (self.pid, self._create_time)
395
+ else:
396
+ return (self.pid, self.create_time())
397
+
398
+ def __str__(self):
399
+ info = collections.OrderedDict()
400
+ info["pid"] = self.pid
401
+ if self._name:
402
+ info['name'] = self._name
403
+ with self.oneshot():
404
+ if self._pid_reused:
405
+ info["status"] = "terminated + PID reused"
406
+ else:
407
+ try:
408
+ info["name"] = self.name()
409
+ info["status"] = self.status()
410
+ except ZombieProcess:
411
+ info["status"] = "zombie"
412
+ except NoSuchProcess:
413
+ info["status"] = "terminated"
414
+ except AccessDenied:
415
+ pass
416
+
417
+ if self._exitcode not in {_SENTINEL, None}:
418
+ info["exitcode"] = self._exitcode
419
+ if self._create_time is not None:
420
+ info['started'] = _pprint_secs(self._create_time)
421
+
422
+ return "%s.%s(%s)" % (
423
+ self.__class__.__module__,
424
+ self.__class__.__name__,
425
+ ", ".join(["%s=%r" % (k, v) for k, v in info.items()]),
426
+ )
427
+
428
+ __repr__ = __str__
429
+
430
+ def __eq__(self, other):
431
+ # Test for equality with another Process object based
432
+ # on PID and creation time.
433
+ if not isinstance(other, Process):
434
+ return NotImplemented
435
+ if OPENBSD or NETBSD: # pragma: no cover
436
+ # Zombie processes on Open/NetBSD have a creation time of
437
+ # 0.0. This covers the case when a process started normally
438
+ # (so it has a ctime), then it turned into a zombie. It's
439
+ # important to do this because is_running() depends on
440
+ # __eq__.
441
+ pid1, ident1 = self._ident
442
+ pid2, ident2 = other._ident
443
+ if pid1 == pid2:
444
+ if ident1 and not ident2:
445
+ try:
446
+ return self.status() == STATUS_ZOMBIE
447
+ except Error:
448
+ pass
449
+ return self._ident == other._ident
450
+
451
+ def __ne__(self, other):
452
+ return not self == other
453
+
454
+ def __hash__(self):
455
+ if self._hash is None:
456
+ self._hash = hash(self._ident)
457
+ return self._hash
458
+
459
+ def _raise_if_pid_reused(self):
460
+ """Raises NoSuchProcess in case process PID has been reused."""
461
+ if self._pid_reused or (not self.is_running() and self._pid_reused):
462
+ # We may directly raise NSP in here already if PID is just
463
+ # not running, but I prefer NSP to be raised naturally by
464
+ # the actual Process API call. This way unit tests will tell
465
+ # us if the API is broken (aka don't raise NSP when it
466
+ # should). We also remain consistent with all other "get"
467
+ # APIs which don't use _raise_if_pid_reused().
468
+ msg = "process no longer exists and its PID has been reused"
469
+ raise NoSuchProcess(self.pid, self._name, msg=msg)
470
+
471
+ @property
472
+ def pid(self):
473
+ """The process PID."""
474
+ return self._pid
475
+
476
+ # --- utility methods
477
+
478
+ @contextlib.contextmanager
479
+ def oneshot(self):
480
+ """Utility context manager which considerably speeds up the
481
+ retrieval of multiple process information at the same time.
482
+
483
+ Internally different process info (e.g. name, ppid, uids,
484
+ gids, ...) may be fetched by using the same routine, but
485
+ only one information is returned and the others are discarded.
486
+ When using this context manager the internal routine is
487
+ executed once (in the example below on name()) and the
488
+ other info are cached.
489
+
490
+ The cache is cleared when exiting the context manager block.
491
+ The advice is to use this every time you retrieve more than
492
+ one information about the process. If you're lucky, you'll
493
+ get a hell of a speedup.
494
+
495
+ >>> import psutil
496
+ >>> p = psutil.Process()
497
+ >>> with p.oneshot():
498
+ ... p.name() # collect multiple info
499
+ ... p.cpu_times() # return cached value
500
+ ... p.cpu_percent() # return cached value
501
+ ... p.create_time() # return cached value
502
+ ...
503
+ >>>
504
+ """
505
+ with self._lock:
506
+ if hasattr(self, "_cache"):
507
+ # NOOP: this covers the use case where the user enters the
508
+ # context twice:
509
+ #
510
+ # >>> with p.oneshot():
511
+ # ... with p.oneshot():
512
+ # ...
513
+ #
514
+ # Also, since as_dict() internally uses oneshot()
515
+ # I expect that the code below will be a pretty common
516
+ # "mistake" that the user will make, so let's guard
517
+ # against that:
518
+ #
519
+ # >>> with p.oneshot():
520
+ # ... p.as_dict()
521
+ # ...
522
+ yield
523
+ else:
524
+ try:
525
+ # cached in case cpu_percent() is used
526
+ self.cpu_times.cache_activate(self)
527
+ # cached in case memory_percent() is used
528
+ self.memory_info.cache_activate(self)
529
+ # cached in case parent() is used
530
+ self.ppid.cache_activate(self)
531
+ # cached in case username() is used
532
+ if POSIX:
533
+ self.uids.cache_activate(self)
534
+ # specific implementation cache
535
+ self._proc.oneshot_enter()
536
+ yield
537
+ finally:
538
+ self.cpu_times.cache_deactivate(self)
539
+ self.memory_info.cache_deactivate(self)
540
+ self.ppid.cache_deactivate(self)
541
+ if POSIX:
542
+ self.uids.cache_deactivate(self)
543
+ self._proc.oneshot_exit()
544
+
545
+ def as_dict(self, attrs=None, ad_value=None):
546
+ """Utility method returning process information as a
547
+ hashable dictionary.
548
+ If *attrs* is specified it must be a list of strings
549
+ reflecting available Process class' attribute names
550
+ (e.g. ['cpu_times', 'name']) else all public (read
551
+ only) attributes are assumed.
552
+ *ad_value* is the value which gets assigned in case
553
+ AccessDenied or ZombieProcess exception is raised when
554
+ retrieving that particular process information.
555
+ """
556
+ valid_names = _as_dict_attrnames
557
+ if attrs is not None:
558
+ if not isinstance(attrs, (list, tuple, set, frozenset)):
559
+ msg = "invalid attrs type %s" % type(attrs)
560
+ raise TypeError(msg)
561
+ attrs = set(attrs)
562
+ invalid_names = attrs - valid_names
563
+ if invalid_names:
564
+ msg = "invalid attr name%s %s" % (
565
+ "s" if len(invalid_names) > 1 else "",
566
+ ", ".join(map(repr, invalid_names)),
567
+ )
568
+ raise ValueError(msg)
569
+
570
+ retdict = {}
571
+ ls = attrs or valid_names
572
+ with self.oneshot():
573
+ for name in ls:
574
+ try:
575
+ if name == 'pid':
576
+ ret = self.pid
577
+ else:
578
+ meth = getattr(self, name)
579
+ ret = meth()
580
+ except (AccessDenied, ZombieProcess):
581
+ ret = ad_value
582
+ except NotImplementedError:
583
+ # in case of not implemented functionality (may happen
584
+ # on old or exotic systems) we want to crash only if
585
+ # the user explicitly asked for that particular attr
586
+ if attrs:
587
+ raise
588
+ continue
589
+ retdict[name] = ret
590
+ return retdict
591
+
592
+ def parent(self):
593
+ """Return the parent process as a Process object pre-emptively
594
+ checking whether PID has been reused.
595
+ If no parent is known return None.
596
+ """
597
+ lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
598
+ if self.pid == lowest_pid:
599
+ return None
600
+ ppid = self.ppid()
601
+ if ppid is not None:
602
+ ctime = self.create_time()
603
+ try:
604
+ parent = Process(ppid)
605
+ if parent.create_time() <= ctime:
606
+ return parent
607
+ # ...else ppid has been reused by another process
608
+ except NoSuchProcess:
609
+ pass
610
+
611
+ def parents(self):
612
+ """Return the parents of this process as a list of Process
613
+ instances. If no parents are known return an empty list.
614
+ """
615
+ parents = []
616
+ proc = self.parent()
617
+ while proc is not None:
618
+ parents.append(proc)
619
+ proc = proc.parent()
620
+ return parents
621
+
622
+ def is_running(self):
623
+ """Return whether this process is running.
624
+
625
+ It also checks if PID has been reused by another process, in
626
+ which case it will remove the process from `process_iter()`
627
+ internal cache and return False.
628
+ """
629
+ if self._gone or self._pid_reused:
630
+ return False
631
+ try:
632
+ # Checking if PID is alive is not enough as the PID might
633
+ # have been reused by another process. Process identity /
634
+ # uniqueness over time is guaranteed by (PID + creation
635
+ # time) and that is verified in __eq__.
636
+ self._pid_reused = self != Process(self.pid)
637
+ if self._pid_reused:
638
+ _pids_reused.add(self.pid)
639
+ raise NoSuchProcess(self.pid)
640
+ return True
641
+ except ZombieProcess:
642
+ # We should never get here as it's already handled in
643
+ # Process.__init__; here just for extra safety.
644
+ return True
645
+ except NoSuchProcess:
646
+ self._gone = True
647
+ return False
648
+
649
+ # --- actual API
650
+
651
+ @memoize_when_activated
652
+ def ppid(self):
653
+ """The process parent PID.
654
+ On Windows the return value is cached after first call.
655
+ """
656
+ # On POSIX we don't want to cache the ppid as it may unexpectedly
657
+ # change to 1 (init) in case this process turns into a zombie:
658
+ # https://github.com/giampaolo/psutil/issues/321
659
+ # http://stackoverflow.com/questions/356722/
660
+
661
+ # XXX should we check creation time here rather than in
662
+ # Process.parent()?
663
+ self._raise_if_pid_reused()
664
+ if POSIX:
665
+ return self._proc.ppid()
666
+ else: # pragma: no cover
667
+ self._ppid = self._ppid or self._proc.ppid()
668
+ return self._ppid
669
+
670
+ def name(self):
671
+ """The process name. The return value is cached after first call."""
672
+ # Process name is only cached on Windows as on POSIX it may
673
+ # change, see:
674
+ # https://github.com/giampaolo/psutil/issues/692
675
+ if WINDOWS and self._name is not None:
676
+ return self._name
677
+ name = self._proc.name()
678
+ if POSIX and len(name) >= 15:
679
+ # On UNIX the name gets truncated to the first 15 characters.
680
+ # If it matches the first part of the cmdline we return that
681
+ # one instead because it's usually more explicative.
682
+ # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
683
+ try:
684
+ cmdline = self.cmdline()
685
+ except (AccessDenied, ZombieProcess):
686
+ # Just pass and return the truncated name: it's better
687
+ # than nothing. Note: there are actual cases where a
688
+ # zombie process can return a name() but not a
689
+ # cmdline(), see:
690
+ # https://github.com/giampaolo/psutil/issues/2239
691
+ pass
692
+ else:
693
+ if cmdline:
694
+ extended_name = os.path.basename(cmdline[0])
695
+ if extended_name.startswith(name):
696
+ name = extended_name
697
+ self._name = name
698
+ self._proc._name = name
699
+ return name
700
+
701
+ def exe(self):
702
+ """The process executable as an absolute path.
703
+ May also be an empty string.
704
+ The return value is cached after first call.
705
+ """
706
+
707
+ def guess_it(fallback):
708
+ # try to guess exe from cmdline[0] in absence of a native
709
+ # exe representation
710
+ cmdline = self.cmdline()
711
+ if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
712
+ exe = cmdline[0] # the possible exe
713
+ # Attempt to guess only in case of an absolute path.
714
+ # It is not safe otherwise as the process might have
715
+ # changed cwd.
716
+ if (
717
+ os.path.isabs(exe)
718
+ and os.path.isfile(exe)
719
+ and os.access(exe, os.X_OK)
720
+ ):
721
+ return exe
722
+ if isinstance(fallback, AccessDenied):
723
+ raise fallback
724
+ return fallback
725
+
726
+ if self._exe is None:
727
+ try:
728
+ exe = self._proc.exe()
729
+ except AccessDenied as err:
730
+ return guess_it(fallback=err)
731
+ else:
732
+ if not exe:
733
+ # underlying implementation can legitimately return an
734
+ # empty string; if that's the case we don't want to
735
+ # raise AD while guessing from the cmdline
736
+ try:
737
+ exe = guess_it(fallback=exe)
738
+ except AccessDenied:
739
+ pass
740
+ self._exe = exe
741
+ return self._exe
742
+
743
+ def cmdline(self):
744
+ """The command line this process has been called with."""
745
+ return self._proc.cmdline()
746
+
747
+ def status(self):
748
+ """The process current status as a STATUS_* constant."""
749
+ try:
750
+ return self._proc.status()
751
+ except ZombieProcess:
752
+ return STATUS_ZOMBIE
753
+
754
+ def username(self):
755
+ """The name of the user that owns the process.
756
+ On UNIX this is calculated by using *real* process uid.
757
+ """
758
+ if POSIX:
759
+ if pwd is None:
760
+ # might happen if python was installed from sources
761
+ msg = "requires pwd module shipped with standard python"
762
+ raise ImportError(msg)
763
+ real_uid = self.uids().real
764
+ try:
765
+ return pwd.getpwuid(real_uid).pw_name
766
+ except KeyError:
767
+ # the uid can't be resolved by the system
768
+ return str(real_uid)
769
+ else:
770
+ return self._proc.username()
771
+
772
+ def create_time(self):
773
+ """The process creation time as a floating point number
774
+ expressed in seconds since the epoch.
775
+ The return value is cached after first call.
776
+ """
777
+ if self._create_time is None:
778
+ self._create_time = self._proc.create_time()
779
+ return self._create_time
780
+
781
+ def cwd(self):
782
+ """Process current working directory as an absolute path."""
783
+ return self._proc.cwd()
784
+
785
+ def nice(self, value=None):
786
+ """Get or set process niceness (priority)."""
787
+ if value is None:
788
+ return self._proc.nice_get()
789
+ else:
790
+ self._raise_if_pid_reused()
791
+ self._proc.nice_set(value)
792
+
793
+ if POSIX:
794
+
795
+ @memoize_when_activated
796
+ def uids(self):
797
+ """Return process UIDs as a (real, effective, saved)
798
+ namedtuple.
799
+ """
800
+ return self._proc.uids()
801
+
802
+ def gids(self):
803
+ """Return process GIDs as a (real, effective, saved)
804
+ namedtuple.
805
+ """
806
+ return self._proc.gids()
807
+
808
+ def terminal(self):
809
+ """The terminal associated with this process, if any,
810
+ else None.
811
+ """
812
+ return self._proc.terminal()
813
+
814
+ def num_fds(self):
815
+ """Return the number of file descriptors opened by this
816
+ process (POSIX only).
817
+ """
818
+ return self._proc.num_fds()
819
+
820
+ # Linux, BSD, AIX and Windows only
821
+ if hasattr(_psplatform.Process, "io_counters"):
822
+
823
+ def io_counters(self):
824
+ """Return process I/O statistics as a
825
+ (read_count, write_count, read_bytes, write_bytes)
826
+ namedtuple.
827
+ Those are the number of read/write calls performed and the
828
+ amount of bytes read and written by the process.
829
+ """
830
+ return self._proc.io_counters()
831
+
832
+ # Linux and Windows
833
+ if hasattr(_psplatform.Process, "ionice_get"):
834
+
835
+ def ionice(self, ioclass=None, value=None):
836
+ """Get or set process I/O niceness (priority).
837
+
838
+ On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
839
+ *value* is a number which goes from 0 to 7. The higher the
840
+ value, the lower the I/O priority of the process.
841
+
842
+ On Windows only *ioclass* is used and it can be set to 2
843
+ (normal), 1 (low) or 0 (very low).
844
+
845
+ Available on Linux and Windows > Vista only.
846
+ """
847
+ if ioclass is None:
848
+ if value is not None:
849
+ msg = "'ioclass' argument must be specified"
850
+ raise ValueError(msg)
851
+ return self._proc.ionice_get()
852
+ else:
853
+ self._raise_if_pid_reused()
854
+ return self._proc.ionice_set(ioclass, value)
855
+
856
+ # Linux / FreeBSD only
857
+ if hasattr(_psplatform.Process, "rlimit"):
858
+
859
+ def rlimit(self, resource, limits=None):
860
+ """Get or set process resource limits as a (soft, hard)
861
+ tuple.
862
+
863
+ *resource* is one of the RLIMIT_* constants.
864
+ *limits* is supposed to be a (soft, hard) tuple.
865
+
866
+ See "man prlimit" for further info.
867
+ Available on Linux and FreeBSD only.
868
+ """
869
+ if limits is not None:
870
+ self._raise_if_pid_reused()
871
+ return self._proc.rlimit(resource, limits)
872
+
873
+ # Windows, Linux and FreeBSD only
874
+ if hasattr(_psplatform.Process, "cpu_affinity_get"):
875
+
876
+ def cpu_affinity(self, cpus=None):
877
+ """Get or set process CPU affinity.
878
+ If specified, *cpus* must be a list of CPUs for which you
879
+ want to set the affinity (e.g. [0, 1]).
880
+ If an empty list is passed, all egible CPUs are assumed
881
+ (and set).
882
+ (Windows, Linux and BSD only).
883
+ """
884
+ if cpus is None:
885
+ return sorted(set(self._proc.cpu_affinity_get()))
886
+ else:
887
+ self._raise_if_pid_reused()
888
+ if not cpus:
889
+ if hasattr(self._proc, "_get_eligible_cpus"):
890
+ cpus = self._proc._get_eligible_cpus()
891
+ else:
892
+ cpus = tuple(range(len(cpu_times(percpu=True))))
893
+ self._proc.cpu_affinity_set(list(set(cpus)))
894
+
895
+ # Linux, FreeBSD, SunOS
896
+ if hasattr(_psplatform.Process, "cpu_num"):
897
+
898
+ def cpu_num(self):
899
+ """Return what CPU this process is currently running on.
900
+ The returned number should be <= psutil.cpu_count()
901
+ and <= len(psutil.cpu_percent(percpu=True)).
902
+ It may be used in conjunction with
903
+ psutil.cpu_percent(percpu=True) to observe the system
904
+ workload distributed across CPUs.
905
+ """
906
+ return self._proc.cpu_num()
907
+
908
+ # All platforms has it, but maybe not in the future.
909
+ if hasattr(_psplatform.Process, "environ"):
910
+
911
+ def environ(self):
912
+ """The environment variables of the process as a dict. Note: this
913
+ might not reflect changes made after the process started.
914
+ """
915
+ return self._proc.environ()
916
+
917
+ if WINDOWS:
918
+
919
+ def num_handles(self):
920
+ """Return the number of handles opened by this process
921
+ (Windows only).
922
+ """
923
+ return self._proc.num_handles()
924
+
925
+ def num_ctx_switches(self):
926
+ """Return the number of voluntary and involuntary context
927
+ switches performed by this process.
928
+ """
929
+ return self._proc.num_ctx_switches()
930
+
931
+ def num_threads(self):
932
+ """Return the number of threads used by this process."""
933
+ return self._proc.num_threads()
934
+
935
+ if hasattr(_psplatform.Process, "threads"):
936
+
937
+ def threads(self):
938
+ """Return threads opened by process as a list of
939
+ (id, user_time, system_time) namedtuples representing
940
+ thread id and thread CPU times (user/system).
941
+ On OpenBSD this method requires root access.
942
+ """
943
+ return self._proc.threads()
944
+
945
+ def children(self, recursive=False):
946
+ """Return the children of this process as a list of Process
947
+ instances, pre-emptively checking whether PID has been reused.
948
+ If *recursive* is True return all the parent descendants.
949
+
950
+ Example (A == this process):
951
+
952
+ A ─┐
953
+
954
+ ├─ B (child) ─┐
955
+ │ └─ X (grandchild) ─┐
956
+ │ └─ Y (great grandchild)
957
+ ├─ C (child)
958
+ └─ D (child)
959
+
960
+ >>> import psutil
961
+ >>> p = psutil.Process()
962
+ >>> p.children()
963
+ B, C, D
964
+ >>> p.children(recursive=True)
965
+ B, X, Y, C, D
966
+
967
+ Note that in the example above if process X disappears
968
+ process Y won't be listed as the reference to process A
969
+ is lost.
970
+ """
971
+ self._raise_if_pid_reused()
972
+ ppid_map = _ppid_map()
973
+ ret = []
974
+ if not recursive:
975
+ for pid, ppid in ppid_map.items():
976
+ if ppid == self.pid:
977
+ try:
978
+ child = Process(pid)
979
+ # if child happens to be older than its parent
980
+ # (self) it means child's PID has been reused
981
+ if self.create_time() <= child.create_time():
982
+ ret.append(child)
983
+ except (NoSuchProcess, ZombieProcess):
984
+ pass
985
+ else:
986
+ # Construct a {pid: [child pids]} dict
987
+ reverse_ppid_map = collections.defaultdict(list)
988
+ for pid, ppid in ppid_map.items():
989
+ reverse_ppid_map[ppid].append(pid)
990
+ # Recursively traverse that dict, starting from self.pid,
991
+ # such that we only call Process() on actual children
992
+ seen = set()
993
+ stack = [self.pid]
994
+ while stack:
995
+ pid = stack.pop()
996
+ if pid in seen:
997
+ # Since pids can be reused while the ppid_map is
998
+ # constructed, there may be rare instances where
999
+ # there's a cycle in the recorded process "tree".
1000
+ continue
1001
+ seen.add(pid)
1002
+ for child_pid in reverse_ppid_map[pid]:
1003
+ try:
1004
+ child = Process(child_pid)
1005
+ # if child happens to be older than its parent
1006
+ # (self) it means child's PID has been reused
1007
+ intime = self.create_time() <= child.create_time()
1008
+ if intime:
1009
+ ret.append(child)
1010
+ stack.append(child_pid)
1011
+ except (NoSuchProcess, ZombieProcess):
1012
+ pass
1013
+ return ret
1014
+
1015
+ def cpu_percent(self, interval=None):
1016
+ """Return a float representing the current process CPU
1017
+ utilization as a percentage.
1018
+
1019
+ When *interval* is 0.0 or None (default) compares process times
1020
+ to system CPU times elapsed since last call, returning
1021
+ immediately (non-blocking). That means that the first time
1022
+ this is called it will return a meaningful 0.0 value.
1023
+
1024
+ When *interval* is > 0.0 compares process times to system CPU
1025
+ times elapsed before and after the interval (blocking).
1026
+
1027
+ In this case is recommended for accuracy that this function
1028
+ be called with at least 0.1 seconds between calls.
1029
+
1030
+ A value > 100.0 can be returned in case of processes running
1031
+ multiple threads on different CPU cores.
1032
+
1033
+ The returned value is explicitly NOT split evenly between
1034
+ all available logical CPUs. This means that a busy loop process
1035
+ running on a system with 2 logical CPUs will be reported as
1036
+ having 100% CPU utilization instead of 50%.
1037
+
1038
+ Examples:
1039
+
1040
+ >>> import psutil
1041
+ >>> p = psutil.Process(os.getpid())
1042
+ >>> # blocking
1043
+ >>> p.cpu_percent(interval=1)
1044
+ 2.0
1045
+ >>> # non-blocking (percentage since last call)
1046
+ >>> p.cpu_percent(interval=None)
1047
+ 2.9
1048
+ >>>
1049
+ """
1050
+ blocking = interval is not None and interval > 0.0
1051
+ if interval is not None and interval < 0:
1052
+ msg = "interval is not positive (got %r)" % interval
1053
+ raise ValueError(msg)
1054
+ num_cpus = cpu_count() or 1
1055
+
1056
+ def timer():
1057
+ return _timer() * num_cpus
1058
+
1059
+ if blocking:
1060
+ st1 = timer()
1061
+ pt1 = self._proc.cpu_times()
1062
+ time.sleep(interval)
1063
+ st2 = timer()
1064
+ pt2 = self._proc.cpu_times()
1065
+ else:
1066
+ st1 = self._last_sys_cpu_times
1067
+ pt1 = self._last_proc_cpu_times
1068
+ st2 = timer()
1069
+ pt2 = self._proc.cpu_times()
1070
+ if st1 is None or pt1 is None:
1071
+ self._last_sys_cpu_times = st2
1072
+ self._last_proc_cpu_times = pt2
1073
+ return 0.0
1074
+
1075
+ delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
1076
+ delta_time = st2 - st1
1077
+ # reset values for next call in case of interval == None
1078
+ self._last_sys_cpu_times = st2
1079
+ self._last_proc_cpu_times = pt2
1080
+
1081
+ try:
1082
+ # This is the utilization split evenly between all CPUs.
1083
+ # E.g. a busy loop process on a 2-CPU-cores system at this
1084
+ # point is reported as 50% instead of 100%.
1085
+ overall_cpus_percent = (delta_proc / delta_time) * 100
1086
+ except ZeroDivisionError:
1087
+ # interval was too low
1088
+ return 0.0
1089
+ else:
1090
+ # Note 1:
1091
+ # in order to emulate "top" we multiply the value for the num
1092
+ # of CPU cores. This way the busy process will be reported as
1093
+ # having 100% (or more) usage.
1094
+ #
1095
+ # Note 2:
1096
+ # taskmgr.exe on Windows differs in that it will show 50%
1097
+ # instead.
1098
+ #
1099
+ # Note 3:
1100
+ # a percentage > 100 is legitimate as it can result from a
1101
+ # process with multiple threads running on different CPU
1102
+ # cores (top does the same), see:
1103
+ # http://stackoverflow.com/questions/1032357
1104
+ # https://github.com/giampaolo/psutil/issues/474
1105
+ single_cpu_percent = overall_cpus_percent * num_cpus
1106
+ return round(single_cpu_percent, 1)
1107
+
1108
+ @memoize_when_activated
1109
+ def cpu_times(self):
1110
+ """Return a (user, system, children_user, children_system)
1111
+ namedtuple representing the accumulated process time, in
1112
+ seconds.
1113
+ This is similar to os.times() but per-process.
1114
+ On macOS and Windows children_user and children_system are
1115
+ always set to 0.
1116
+ """
1117
+ return self._proc.cpu_times()
1118
+
1119
+ @memoize_when_activated
1120
+ def memory_info(self):
1121
+ """Return a namedtuple with variable fields depending on the
1122
+ platform, representing memory information about the process.
1123
+
1124
+ The "portable" fields available on all platforms are `rss` and `vms`.
1125
+
1126
+ All numbers are expressed in bytes.
1127
+ """
1128
+ return self._proc.memory_info()
1129
+
1130
+ @_common.deprecated_method(replacement="memory_info")
1131
+ def memory_info_ex(self):
1132
+ return self.memory_info()
1133
+
1134
+ def memory_full_info(self):
1135
+ """This method returns the same information as memory_info(),
1136
+ plus, on some platform (Linux, macOS, Windows), also provides
1137
+ additional metrics (USS, PSS and swap).
1138
+ The additional metrics provide a better representation of actual
1139
+ process memory usage.
1140
+
1141
+ Namely USS is the memory which is unique to a process and which
1142
+ would be freed if the process was terminated right now.
1143
+
1144
+ It does so by passing through the whole process address.
1145
+ As such it usually requires higher user privileges than
1146
+ memory_info() and is considerably slower.
1147
+ """
1148
+ return self._proc.memory_full_info()
1149
+
1150
+ def memory_percent(self, memtype="rss"):
1151
+ """Compare process memory to total physical system memory and
1152
+ calculate process memory utilization as a percentage.
1153
+ *memtype* argument is a string that dictates what type of
1154
+ process memory you want to compare against (defaults to "rss").
1155
+ The list of available strings can be obtained like this:
1156
+
1157
+ >>> psutil.Process().memory_info()._fields
1158
+ ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
1159
+ """
1160
+ valid_types = list(_psplatform.pfullmem._fields)
1161
+ if memtype not in valid_types:
1162
+ msg = "invalid memtype %r; valid types are %r" % (
1163
+ memtype,
1164
+ tuple(valid_types),
1165
+ )
1166
+ raise ValueError(msg)
1167
+ fun = (
1168
+ self.memory_info
1169
+ if memtype in _psplatform.pmem._fields
1170
+ else self.memory_full_info
1171
+ )
1172
+ metrics = fun()
1173
+ value = getattr(metrics, memtype)
1174
+
1175
+ # use cached value if available
1176
+ total_phymem = _TOTAL_PHYMEM or virtual_memory().total
1177
+ if not total_phymem > 0:
1178
+ # we should never get here
1179
+ msg = (
1180
+ "can't calculate process memory percent because total physical"
1181
+ " system memory is not positive (%r)" % (total_phymem)
1182
+ )
1183
+ raise ValueError(msg)
1184
+ return (value / float(total_phymem)) * 100
1185
+
1186
+ if hasattr(_psplatform.Process, "memory_maps"):
1187
+
1188
+ def memory_maps(self, grouped=True):
1189
+ """Return process' mapped memory regions as a list of namedtuples
1190
+ whose fields are variable depending on the platform.
1191
+
1192
+ If *grouped* is True the mapped regions with the same 'path'
1193
+ are grouped together and the different memory fields are summed.
1194
+
1195
+ If *grouped* is False every mapped region is shown as a single
1196
+ entity and the namedtuple will also include the mapped region's
1197
+ address space ('addr') and permission set ('perms').
1198
+ """
1199
+ it = self._proc.memory_maps()
1200
+ if grouped:
1201
+ d = {}
1202
+ for tupl in it:
1203
+ path = tupl[2]
1204
+ nums = tupl[3:]
1205
+ try:
1206
+ d[path] = map(lambda x, y: x + y, d[path], nums)
1207
+ except KeyError:
1208
+ d[path] = nums
1209
+ nt = _psplatform.pmmap_grouped
1210
+ return [nt(path, *d[path]) for path in d] # NOQA
1211
+ else:
1212
+ nt = _psplatform.pmmap_ext
1213
+ return [nt(*x) for x in it]
1214
+
1215
+ def open_files(self):
1216
+ """Return files opened by process as a list of
1217
+ (path, fd) namedtuples including the absolute file name
1218
+ and file descriptor number.
1219
+ """
1220
+ return self._proc.open_files()
1221
+
1222
+ def net_connections(self, kind='inet'):
1223
+ """Return socket connections opened by process as a list of
1224
+ (fd, family, type, laddr, raddr, status) namedtuples.
1225
+ The *kind* parameter filters for connections that match the
1226
+ following criteria:
1227
+
1228
+ +------------+----------------------------------------------------+
1229
+ | Kind Value | Connections using |
1230
+ +------------+----------------------------------------------------+
1231
+ | inet | IPv4 and IPv6 |
1232
+ | inet4 | IPv4 |
1233
+ | inet6 | IPv6 |
1234
+ | tcp | TCP |
1235
+ | tcp4 | TCP over IPv4 |
1236
+ | tcp6 | TCP over IPv6 |
1237
+ | udp | UDP |
1238
+ | udp4 | UDP over IPv4 |
1239
+ | udp6 | UDP over IPv6 |
1240
+ | unix | UNIX socket (both UDP and TCP protocols) |
1241
+ | all | the sum of all the possible families and protocols |
1242
+ +------------+----------------------------------------------------+
1243
+ """
1244
+ return self._proc.net_connections(kind)
1245
+
1246
+ @_common.deprecated_method(replacement="net_connections")
1247
+ def connections(self, kind="inet"):
1248
+ return self.net_connections(kind=kind)
1249
+
1250
+ # --- signals
1251
+
1252
+ if POSIX:
1253
+
1254
+ def _send_signal(self, sig):
1255
+ assert not self.pid < 0, self.pid
1256
+ self._raise_if_pid_reused()
1257
+ if self.pid == 0:
1258
+ # see "man 2 kill"
1259
+ msg = (
1260
+ "preventing sending signal to process with PID 0 as it "
1261
+ "would affect every process in the process group of the "
1262
+ "calling process (os.getpid()) instead of PID 0"
1263
+ )
1264
+ raise ValueError(msg)
1265
+ try:
1266
+ os.kill(self.pid, sig)
1267
+ except ProcessLookupError:
1268
+ if OPENBSD and pid_exists(self.pid):
1269
+ # We do this because os.kill() lies in case of
1270
+ # zombie processes.
1271
+ raise ZombieProcess(self.pid, self._name, self._ppid)
1272
+ else:
1273
+ self._gone = True
1274
+ raise NoSuchProcess(self.pid, self._name)
1275
+ except PermissionError:
1276
+ raise AccessDenied(self.pid, self._name)
1277
+
1278
+ def send_signal(self, sig):
1279
+ """Send a signal *sig* to process pre-emptively checking
1280
+ whether PID has been reused (see signal module constants) .
1281
+ On Windows only SIGTERM is valid and is treated as an alias
1282
+ for kill().
1283
+ """
1284
+ if POSIX:
1285
+ self._send_signal(sig)
1286
+ else: # pragma: no cover
1287
+ self._raise_if_pid_reused()
1288
+ if sig != signal.SIGTERM and not self.is_running():
1289
+ msg = "process no longer exists"
1290
+ raise NoSuchProcess(self.pid, self._name, msg=msg)
1291
+ self._proc.send_signal(sig)
1292
+
1293
+ def suspend(self):
1294
+ """Suspend process execution with SIGSTOP pre-emptively checking
1295
+ whether PID has been reused.
1296
+ On Windows this has the effect of suspending all process threads.
1297
+ """
1298
+ if POSIX:
1299
+ self._send_signal(signal.SIGSTOP)
1300
+ else: # pragma: no cover
1301
+ self._raise_if_pid_reused()
1302
+ self._proc.suspend()
1303
+
1304
+ def resume(self):
1305
+ """Resume process execution with SIGCONT pre-emptively checking
1306
+ whether PID has been reused.
1307
+ On Windows this has the effect of resuming all process threads.
1308
+ """
1309
+ if POSIX:
1310
+ self._send_signal(signal.SIGCONT)
1311
+ else: # pragma: no cover
1312
+ self._raise_if_pid_reused()
1313
+ self._proc.resume()
1314
+
1315
+ def terminate(self):
1316
+ """Terminate the process with SIGTERM pre-emptively checking
1317
+ whether PID has been reused.
1318
+ On Windows this is an alias for kill().
1319
+ """
1320
+ if POSIX:
1321
+ self._send_signal(signal.SIGTERM)
1322
+ else: # pragma: no cover
1323
+ self._raise_if_pid_reused()
1324
+ self._proc.kill()
1325
+
1326
+ def kill(self):
1327
+ """Kill the current process with SIGKILL pre-emptively checking
1328
+ whether PID has been reused.
1329
+ """
1330
+ if POSIX:
1331
+ self._send_signal(signal.SIGKILL)
1332
+ else: # pragma: no cover
1333
+ self._raise_if_pid_reused()
1334
+ self._proc.kill()
1335
+
1336
+ def wait(self, timeout=None):
1337
+ """Wait for process to terminate and, if process is a children
1338
+ of os.getpid(), also return its exit code, else None.
1339
+ On Windows there's no such limitation (exit code is always
1340
+ returned).
1341
+
1342
+ If the process is already terminated immediately return None
1343
+ instead of raising NoSuchProcess.
1344
+
1345
+ If *timeout* (in seconds) is specified and process is still
1346
+ alive raise TimeoutExpired.
1347
+
1348
+ To wait for multiple Process(es) use psutil.wait_procs().
1349
+ """
1350
+ if timeout is not None and not timeout >= 0:
1351
+ msg = "timeout must be a positive integer"
1352
+ raise ValueError(msg)
1353
+ if self._exitcode is not _SENTINEL:
1354
+ return self._exitcode
1355
+ self._exitcode = self._proc.wait(timeout)
1356
+ return self._exitcode
1357
+
1358
+
1359
+ # The valid attr names which can be processed by Process.as_dict().
1360
+ # fmt: off
1361
+ _as_dict_attrnames = set(
1362
+ [x for x in dir(Process) if not x.startswith('_') and x not in
1363
+ {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
1364
+ 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
1365
+ 'memory_info_ex', 'connections', 'oneshot'}])
1366
+ # fmt: on
1367
+
1368
+
1369
+ # =====================================================================
1370
+ # --- Popen class
1371
+ # =====================================================================
1372
+
1373
+
1374
+ class Popen(Process):
1375
+ """Same as subprocess.Popen, but in addition it provides all
1376
+ psutil.Process methods in a single class.
1377
+ For the following methods which are common to both classes, psutil
1378
+ implementation takes precedence:
1379
+
1380
+ * send_signal()
1381
+ * terminate()
1382
+ * kill()
1383
+
1384
+ This is done in order to avoid killing another process in case its
1385
+ PID has been reused, fixing BPO-6973.
1386
+
1387
+ >>> import psutil
1388
+ >>> from subprocess import PIPE
1389
+ >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
1390
+ >>> p.name()
1391
+ 'python'
1392
+ >>> p.uids()
1393
+ user(real=1000, effective=1000, saved=1000)
1394
+ >>> p.username()
1395
+ 'giampaolo'
1396
+ >>> p.communicate()
1397
+ ('hi', None)
1398
+ >>> p.terminate()
1399
+ >>> p.wait(timeout=2)
1400
+ 0
1401
+ >>>
1402
+ """
1403
+
1404
+ def __init__(self, *args, **kwargs):
1405
+ # Explicitly avoid to raise NoSuchProcess in case the process
1406
+ # spawned by subprocess.Popen terminates too quickly, see:
1407
+ # https://github.com/giampaolo/psutil/issues/193
1408
+ self.__subproc = subprocess.Popen(*args, **kwargs)
1409
+ self._init(self.__subproc.pid, _ignore_nsp=True)
1410
+
1411
+ def __dir__(self):
1412
+ return sorted(set(dir(Popen) + dir(subprocess.Popen)))
1413
+
1414
+ def __enter__(self):
1415
+ if hasattr(self.__subproc, '__enter__'):
1416
+ self.__subproc.__enter__()
1417
+ return self
1418
+
1419
+ def __exit__(self, *args, **kwargs):
1420
+ if hasattr(self.__subproc, '__exit__'):
1421
+ return self.__subproc.__exit__(*args, **kwargs)
1422
+ else:
1423
+ if self.stdout:
1424
+ self.stdout.close()
1425
+ if self.stderr:
1426
+ self.stderr.close()
1427
+ try:
1428
+ # Flushing a BufferedWriter may raise an error.
1429
+ if self.stdin:
1430
+ self.stdin.close()
1431
+ finally:
1432
+ # Wait for the process to terminate, to avoid zombies.
1433
+ self.wait()
1434
+
1435
+ def __getattribute__(self, name):
1436
+ try:
1437
+ return object.__getattribute__(self, name)
1438
+ except AttributeError:
1439
+ try:
1440
+ return object.__getattribute__(self.__subproc, name)
1441
+ except AttributeError:
1442
+ msg = "%s instance has no attribute '%s'" % (
1443
+ self.__class__.__name__,
1444
+ name,
1445
+ )
1446
+ raise AttributeError(msg)
1447
+
1448
+ def wait(self, timeout=None):
1449
+ if self.__subproc.returncode is not None:
1450
+ return self.__subproc.returncode
1451
+ ret = super(Popen, self).wait(timeout) # noqa
1452
+ self.__subproc.returncode = ret
1453
+ return ret
1454
+
1455
+
1456
+ # =====================================================================
1457
+ # --- system processes related functions
1458
+ # =====================================================================
1459
+
1460
+
1461
+ def pids():
1462
+ """Return a list of current running PIDs."""
1463
+ global _LOWEST_PID
1464
+ ret = sorted(_psplatform.pids())
1465
+ _LOWEST_PID = ret[0]
1466
+ return ret
1467
+
1468
+
1469
+ def pid_exists(pid):
1470
+ """Return True if given PID exists in the current process list.
1471
+ This is faster than doing "pid in psutil.pids()" and
1472
+ should be preferred.
1473
+ """
1474
+ if pid < 0:
1475
+ return False
1476
+ elif pid == 0 and POSIX:
1477
+ # On POSIX we use os.kill() to determine PID existence.
1478
+ # According to "man 2 kill" PID 0 has a special meaning
1479
+ # though: it refers to <<every process in the process
1480
+ # group of the calling process>> and that is not we want
1481
+ # to do here.
1482
+ return pid in pids()
1483
+ else:
1484
+ return _psplatform.pid_exists(pid)
1485
+
1486
+
1487
+ _pmap = {}
1488
+ _pids_reused = set()
1489
+
1490
+
1491
+ def process_iter(attrs=None, ad_value=None):
1492
+ """Return a generator yielding a Process instance for all
1493
+ running processes.
1494
+
1495
+ Every new Process instance is only created once and then cached
1496
+ into an internal table which is updated every time this is used.
1497
+ Cache can optionally be cleared via `process_iter.clear_cache()`.
1498
+
1499
+ The sorting order in which processes are yielded is based on
1500
+ their PIDs.
1501
+
1502
+ *attrs* and *ad_value* have the same meaning as in
1503
+ Process.as_dict(). If *attrs* is specified as_dict() is called
1504
+ and the resulting dict is stored as a 'info' attribute attached
1505
+ to returned Process instance.
1506
+ If *attrs* is an empty list it will retrieve all process info
1507
+ (slow).
1508
+ """
1509
+ global _pmap
1510
+
1511
+ def add(pid):
1512
+ proc = Process(pid)
1513
+ pmap[proc.pid] = proc
1514
+ return proc
1515
+
1516
+ def remove(pid):
1517
+ pmap.pop(pid, None)
1518
+
1519
+ pmap = _pmap.copy()
1520
+ a = set(pids())
1521
+ b = set(pmap.keys())
1522
+ new_pids = a - b
1523
+ gone_pids = b - a
1524
+ for pid in gone_pids:
1525
+ remove(pid)
1526
+ while _pids_reused:
1527
+ pid = _pids_reused.pop()
1528
+ debug("refreshing Process instance for reused PID %s" % pid)
1529
+ remove(pid)
1530
+ try:
1531
+ ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
1532
+ for pid, proc in ls:
1533
+ try:
1534
+ if proc is None: # new process
1535
+ proc = add(pid)
1536
+ if attrs is not None:
1537
+ proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
1538
+ yield proc
1539
+ except NoSuchProcess:
1540
+ remove(pid)
1541
+ finally:
1542
+ _pmap = pmap
1543
+
1544
+
1545
+ process_iter.cache_clear = lambda: _pmap.clear() # noqa
1546
+ process_iter.cache_clear.__doc__ = "Clear process_iter() internal cache."
1547
+
1548
+
1549
+ def wait_procs(procs, timeout=None, callback=None):
1550
+ """Convenience function which waits for a list of processes to
1551
+ terminate.
1552
+
1553
+ Return a (gone, alive) tuple indicating which processes
1554
+ are gone and which ones are still alive.
1555
+
1556
+ The gone ones will have a new *returncode* attribute indicating
1557
+ process exit status (may be None).
1558
+
1559
+ *callback* is a function which gets called every time a process
1560
+ terminates (a Process instance is passed as callback argument).
1561
+
1562
+ Function will return as soon as all processes terminate or when
1563
+ *timeout* occurs.
1564
+ Differently from Process.wait() it will not raise TimeoutExpired if
1565
+ *timeout* occurs.
1566
+
1567
+ Typical use case is:
1568
+
1569
+ - send SIGTERM to a list of processes
1570
+ - give them some time to terminate
1571
+ - send SIGKILL to those ones which are still alive
1572
+
1573
+ Example:
1574
+
1575
+ >>> def on_terminate(proc):
1576
+ ... print("process {} terminated".format(proc))
1577
+ ...
1578
+ >>> for p in procs:
1579
+ ... p.terminate()
1580
+ ...
1581
+ >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
1582
+ >>> for p in alive:
1583
+ ... p.kill()
1584
+ """
1585
+
1586
+ def check_gone(proc, timeout):
1587
+ try:
1588
+ returncode = proc.wait(timeout=timeout)
1589
+ except TimeoutExpired:
1590
+ pass
1591
+ except _SubprocessTimeoutExpired:
1592
+ pass
1593
+ else:
1594
+ if returncode is not None or not proc.is_running():
1595
+ # Set new Process instance attribute.
1596
+ proc.returncode = returncode
1597
+ gone.add(proc)
1598
+ if callback is not None:
1599
+ callback(proc)
1600
+
1601
+ if timeout is not None and not timeout >= 0:
1602
+ msg = "timeout must be a positive integer, got %s" % timeout
1603
+ raise ValueError(msg)
1604
+ gone = set()
1605
+ alive = set(procs)
1606
+ if callback is not None and not callable(callback):
1607
+ msg = "callback %r is not a callable" % callback
1608
+ raise TypeError(msg)
1609
+ if timeout is not None:
1610
+ deadline = _timer() + timeout
1611
+
1612
+ while alive:
1613
+ if timeout is not None and timeout <= 0:
1614
+ break
1615
+ for proc in alive:
1616
+ # Make sure that every complete iteration (all processes)
1617
+ # will last max 1 sec.
1618
+ # We do this because we don't want to wait too long on a
1619
+ # single process: in case it terminates too late other
1620
+ # processes may disappear in the meantime and their PID
1621
+ # reused.
1622
+ max_timeout = 1.0 / len(alive)
1623
+ if timeout is not None:
1624
+ timeout = min((deadline - _timer()), max_timeout)
1625
+ if timeout <= 0:
1626
+ break
1627
+ check_gone(proc, timeout)
1628
+ else:
1629
+ check_gone(proc, max_timeout)
1630
+ alive = alive - gone # noqa PLR6104
1631
+
1632
+ if alive:
1633
+ # Last attempt over processes survived so far.
1634
+ # timeout == 0 won't make this function wait any further.
1635
+ for proc in alive:
1636
+ check_gone(proc, 0)
1637
+ alive = alive - gone # noqa: PLR6104
1638
+
1639
+ return (list(gone), list(alive))
1640
+
1641
+
1642
+ # =====================================================================
1643
+ # --- CPU related functions
1644
+ # =====================================================================
1645
+
1646
+
1647
+ def cpu_count(logical=True):
1648
+ """Return the number of logical CPUs in the system (same as
1649
+ os.cpu_count() in Python 3.4).
1650
+
1651
+ If *logical* is False return the number of physical cores only
1652
+ (e.g. hyper thread CPUs are excluded).
1653
+
1654
+ Return None if undetermined.
1655
+
1656
+ The return value is cached after first call.
1657
+ If desired cache can be cleared like this:
1658
+
1659
+ >>> psutil.cpu_count.cache_clear()
1660
+ """
1661
+ if logical:
1662
+ ret = _psplatform.cpu_count_logical()
1663
+ else:
1664
+ ret = _psplatform.cpu_count_cores()
1665
+ if ret is not None and ret < 1:
1666
+ ret = None
1667
+ return ret
1668
+
1669
+
1670
+ def cpu_times(percpu=False):
1671
+ """Return system-wide CPU times as a namedtuple.
1672
+ Every CPU time represents the seconds the CPU has spent in the
1673
+ given mode. The namedtuple's fields availability varies depending on the
1674
+ platform:
1675
+
1676
+ - user
1677
+ - system
1678
+ - idle
1679
+ - nice (UNIX)
1680
+ - iowait (Linux)
1681
+ - irq (Linux, FreeBSD)
1682
+ - softirq (Linux)
1683
+ - steal (Linux >= 2.6.11)
1684
+ - guest (Linux >= 2.6.24)
1685
+ - guest_nice (Linux >= 3.2.0)
1686
+
1687
+ When *percpu* is True return a list of namedtuples for each CPU.
1688
+ First element of the list refers to first CPU, second element
1689
+ to second CPU and so on.
1690
+ The order of the list is consistent across calls.
1691
+ """
1692
+ if not percpu:
1693
+ return _psplatform.cpu_times()
1694
+ else:
1695
+ return _psplatform.per_cpu_times()
1696
+
1697
+
1698
+ try:
1699
+ _last_cpu_times = {threading.current_thread().ident: cpu_times()}
1700
+ except Exception: # noqa: BLE001
1701
+ # Don't want to crash at import time.
1702
+ _last_cpu_times = {}
1703
+
1704
+ try:
1705
+ _last_per_cpu_times = {
1706
+ threading.current_thread().ident: cpu_times(percpu=True)
1707
+ }
1708
+ except Exception: # noqa: BLE001
1709
+ # Don't want to crash at import time.
1710
+ _last_per_cpu_times = {}
1711
+
1712
+
1713
+ def _cpu_tot_time(times):
1714
+ """Given a cpu_time() ntuple calculates the total CPU time
1715
+ (including idle time).
1716
+ """
1717
+ tot = sum(times)
1718
+ if LINUX:
1719
+ # On Linux guest times are already accounted in "user" or
1720
+ # "nice" times, so we subtract them from total.
1721
+ # Htop does the same. References:
1722
+ # https://github.com/giampaolo/psutil/pull/940
1723
+ # http://unix.stackexchange.com/questions/178045
1724
+ # https://github.com/torvalds/linux/blob/
1725
+ # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
1726
+ # cputime.c#L158
1727
+ tot -= getattr(times, "guest", 0) # Linux 2.6.24+
1728
+ tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
1729
+ return tot
1730
+
1731
+
1732
+ def _cpu_busy_time(times):
1733
+ """Given a cpu_time() ntuple calculates the busy CPU time.
1734
+ We do so by subtracting all idle CPU times.
1735
+ """
1736
+ busy = _cpu_tot_time(times)
1737
+ busy -= times.idle
1738
+ # Linux: "iowait" is time during which the CPU does not do anything
1739
+ # (waits for IO to complete). On Linux IO wait is *not* accounted
1740
+ # in "idle" time so we subtract it. Htop does the same.
1741
+ # References:
1742
+ # https://github.com/torvalds/linux/blob/
1743
+ # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
1744
+ busy -= getattr(times, "iowait", 0)
1745
+ return busy
1746
+
1747
+
1748
+ def _cpu_times_deltas(t1, t2):
1749
+ assert t1._fields == t2._fields, (t1, t2)
1750
+ field_deltas = []
1751
+ for field in _psplatform.scputimes._fields:
1752
+ field_delta = getattr(t2, field) - getattr(t1, field)
1753
+ # CPU times are always supposed to increase over time
1754
+ # or at least remain the same and that's because time
1755
+ # cannot go backwards.
1756
+ # Surprisingly sometimes this might not be the case (at
1757
+ # least on Windows and Linux), see:
1758
+ # https://github.com/giampaolo/psutil/issues/392
1759
+ # https://github.com/giampaolo/psutil/issues/645
1760
+ # https://github.com/giampaolo/psutil/issues/1210
1761
+ # Trim negative deltas to zero to ignore decreasing fields.
1762
+ # top does the same. Reference:
1763
+ # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
1764
+ field_delta = max(0, field_delta)
1765
+ field_deltas.append(field_delta)
1766
+ return _psplatform.scputimes(*field_deltas)
1767
+
1768
+
1769
+ def cpu_percent(interval=None, percpu=False):
1770
+ """Return a float representing the current system-wide CPU
1771
+ utilization as a percentage.
1772
+
1773
+ When *interval* is > 0.0 compares system CPU times elapsed before
1774
+ and after the interval (blocking).
1775
+
1776
+ When *interval* is 0.0 or None compares system CPU times elapsed
1777
+ since last call or module import, returning immediately (non
1778
+ blocking). That means the first time this is called it will
1779
+ return a meaningless 0.0 value which you should ignore.
1780
+ In this case is recommended for accuracy that this function be
1781
+ called with at least 0.1 seconds between calls.
1782
+
1783
+ When *percpu* is True returns a list of floats representing the
1784
+ utilization as a percentage for each CPU.
1785
+ First element of the list refers to first CPU, second element
1786
+ to second CPU and so on.
1787
+ The order of the list is consistent across calls.
1788
+
1789
+ Examples:
1790
+
1791
+ >>> # blocking, system-wide
1792
+ >>> psutil.cpu_percent(interval=1)
1793
+ 2.0
1794
+ >>>
1795
+ >>> # blocking, per-cpu
1796
+ >>> psutil.cpu_percent(interval=1, percpu=True)
1797
+ [2.0, 1.0]
1798
+ >>>
1799
+ >>> # non-blocking (percentage since last call)
1800
+ >>> psutil.cpu_percent(interval=None)
1801
+ 2.9
1802
+ >>>
1803
+ """
1804
+ tid = threading.current_thread().ident
1805
+ blocking = interval is not None and interval > 0.0
1806
+ if interval is not None and interval < 0:
1807
+ msg = "interval is not positive (got %r)" % interval
1808
+ raise ValueError(msg)
1809
+
1810
+ def calculate(t1, t2):
1811
+ times_delta = _cpu_times_deltas(t1, t2)
1812
+ all_delta = _cpu_tot_time(times_delta)
1813
+ busy_delta = _cpu_busy_time(times_delta)
1814
+
1815
+ try:
1816
+ busy_perc = (busy_delta / all_delta) * 100
1817
+ except ZeroDivisionError:
1818
+ return 0.0
1819
+ else:
1820
+ return round(busy_perc, 1)
1821
+
1822
+ # system-wide usage
1823
+ if not percpu:
1824
+ if blocking:
1825
+ t1 = cpu_times()
1826
+ time.sleep(interval)
1827
+ else:
1828
+ t1 = _last_cpu_times.get(tid) or cpu_times()
1829
+ _last_cpu_times[tid] = cpu_times()
1830
+ return calculate(t1, _last_cpu_times[tid])
1831
+ # per-cpu usage
1832
+ else:
1833
+ ret = []
1834
+ if blocking:
1835
+ tot1 = cpu_times(percpu=True)
1836
+ time.sleep(interval)
1837
+ else:
1838
+ tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True)
1839
+ _last_per_cpu_times[tid] = cpu_times(percpu=True)
1840
+ for t1, t2 in zip(tot1, _last_per_cpu_times[tid]):
1841
+ ret.append(calculate(t1, t2))
1842
+ return ret
1843
+
1844
+
1845
+ # Use a separate dict for cpu_times_percent(), so it's independent from
1846
+ # cpu_percent() and they can both be used within the same program.
1847
+ _last_cpu_times_2 = _last_cpu_times.copy()
1848
+ _last_per_cpu_times_2 = _last_per_cpu_times.copy()
1849
+
1850
+
1851
+ def cpu_times_percent(interval=None, percpu=False):
1852
+ """Same as cpu_percent() but provides utilization percentages
1853
+ for each specific CPU time as is returned by cpu_times().
1854
+ For instance, on Linux we'll get:
1855
+
1856
+ >>> cpu_times_percent()
1857
+ cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
1858
+ irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
1859
+ >>>
1860
+
1861
+ *interval* and *percpu* arguments have the same meaning as in
1862
+ cpu_percent().
1863
+ """
1864
+ tid = threading.current_thread().ident
1865
+ blocking = interval is not None and interval > 0.0
1866
+ if interval is not None and interval < 0:
1867
+ msg = "interval is not positive (got %r)" % interval
1868
+ raise ValueError(msg)
1869
+
1870
+ def calculate(t1, t2):
1871
+ nums = []
1872
+ times_delta = _cpu_times_deltas(t1, t2)
1873
+ all_delta = _cpu_tot_time(times_delta)
1874
+ # "scale" is the value to multiply each delta with to get percentages.
1875
+ # We use "max" to avoid division by zero (if all_delta is 0, then all
1876
+ # fields are 0 so percentages will be 0 too. all_delta cannot be a
1877
+ # fraction because cpu times are integers)
1878
+ scale = 100.0 / max(1, all_delta)
1879
+ for field_delta in times_delta:
1880
+ field_perc = field_delta * scale
1881
+ field_perc = round(field_perc, 1)
1882
+ # make sure we don't return negative values or values over 100%
1883
+ field_perc = min(max(0.0, field_perc), 100.0)
1884
+ nums.append(field_perc)
1885
+ return _psplatform.scputimes(*nums)
1886
+
1887
+ # system-wide usage
1888
+ if not percpu:
1889
+ if blocking:
1890
+ t1 = cpu_times()
1891
+ time.sleep(interval)
1892
+ else:
1893
+ t1 = _last_cpu_times_2.get(tid) or cpu_times()
1894
+ _last_cpu_times_2[tid] = cpu_times()
1895
+ return calculate(t1, _last_cpu_times_2[tid])
1896
+ # per-cpu usage
1897
+ else:
1898
+ ret = []
1899
+ if blocking:
1900
+ tot1 = cpu_times(percpu=True)
1901
+ time.sleep(interval)
1902
+ else:
1903
+ tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True)
1904
+ _last_per_cpu_times_2[tid] = cpu_times(percpu=True)
1905
+ for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]):
1906
+ ret.append(calculate(t1, t2))
1907
+ return ret
1908
+
1909
+
1910
+ def cpu_stats():
1911
+ """Return CPU statistics."""
1912
+ return _psplatform.cpu_stats()
1913
+
1914
+
1915
+ if hasattr(_psplatform, "cpu_freq"):
1916
+
1917
+ def cpu_freq(percpu=False):
1918
+ """Return CPU frequency as a namedtuple including current,
1919
+ min and max frequency expressed in Mhz.
1920
+
1921
+ If *percpu* is True and the system supports per-cpu frequency
1922
+ retrieval (Linux only) a list of frequencies is returned for
1923
+ each CPU. If not a list with one element is returned.
1924
+ """
1925
+ ret = _psplatform.cpu_freq()
1926
+ if percpu:
1927
+ return ret
1928
+ else:
1929
+ num_cpus = float(len(ret))
1930
+ if num_cpus == 0:
1931
+ return None
1932
+ elif num_cpus == 1:
1933
+ return ret[0]
1934
+ else:
1935
+ currs, mins, maxs = 0.0, 0.0, 0.0
1936
+ set_none = False
1937
+ for cpu in ret:
1938
+ currs += cpu.current
1939
+ # On Linux if /proc/cpuinfo is used min/max are set
1940
+ # to None.
1941
+ if LINUX and cpu.min is None:
1942
+ set_none = True
1943
+ continue
1944
+ mins += cpu.min
1945
+ maxs += cpu.max
1946
+
1947
+ current = currs / num_cpus
1948
+
1949
+ if set_none:
1950
+ min_ = max_ = None
1951
+ else:
1952
+ min_ = mins / num_cpus
1953
+ max_ = maxs / num_cpus
1954
+
1955
+ return _common.scpufreq(current, min_, max_)
1956
+
1957
+ __all__.append("cpu_freq")
1958
+
1959
+
1960
+ if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
1961
+ # Perform this hasattr check once on import time to either use the
1962
+ # platform based code or proxy straight from the os module.
1963
+ if hasattr(os, "getloadavg"):
1964
+ getloadavg = os.getloadavg
1965
+ else:
1966
+ getloadavg = _psplatform.getloadavg
1967
+
1968
+ __all__.append("getloadavg")
1969
+
1970
+
1971
+ # =====================================================================
1972
+ # --- system memory related functions
1973
+ # =====================================================================
1974
+
1975
+
1976
+ def virtual_memory():
1977
+ """Return statistics about system memory usage as a namedtuple
1978
+ including the following fields, expressed in bytes:
1979
+
1980
+ - total:
1981
+ total physical memory available.
1982
+
1983
+ - available:
1984
+ the memory that can be given instantly to processes without the
1985
+ system going into swap.
1986
+ This is calculated by summing different memory values depending
1987
+ on the platform and it is supposed to be used to monitor actual
1988
+ memory usage in a cross platform fashion.
1989
+
1990
+ - percent:
1991
+ the percentage usage calculated as (total - available) / total * 100
1992
+
1993
+ - used:
1994
+ memory used, calculated differently depending on the platform and
1995
+ designed for informational purposes only:
1996
+ macOS: active + wired
1997
+ BSD: active + wired + cached
1998
+ Linux: total - free
1999
+
2000
+ - free:
2001
+ memory not being used at all (zeroed) that is readily available;
2002
+ note that this doesn't reflect the actual memory available
2003
+ (use 'available' instead)
2004
+
2005
+ Platform-specific fields:
2006
+
2007
+ - active (UNIX):
2008
+ memory currently in use or very recently used, and so it is in RAM.
2009
+
2010
+ - inactive (UNIX):
2011
+ memory that is marked as not used.
2012
+
2013
+ - buffers (BSD, Linux):
2014
+ cache for things like file system metadata.
2015
+
2016
+ - cached (BSD, macOS):
2017
+ cache for various things.
2018
+
2019
+ - wired (macOS, BSD):
2020
+ memory that is marked to always stay in RAM. It is never moved to disk.
2021
+
2022
+ - shared (BSD):
2023
+ memory that may be simultaneously accessed by multiple processes.
2024
+
2025
+ The sum of 'used' and 'available' does not necessarily equal total.
2026
+ On Windows 'available' and 'free' are the same.
2027
+ """
2028
+ global _TOTAL_PHYMEM
2029
+ ret = _psplatform.virtual_memory()
2030
+ # cached for later use in Process.memory_percent()
2031
+ _TOTAL_PHYMEM = ret.total
2032
+ return ret
2033
+
2034
+
2035
+ def swap_memory():
2036
+ """Return system swap memory statistics as a namedtuple including
2037
+ the following fields:
2038
+
2039
+ - total: total swap memory in bytes
2040
+ - used: used swap memory in bytes
2041
+ - free: free swap memory in bytes
2042
+ - percent: the percentage usage
2043
+ - sin: no. of bytes the system has swapped in from disk (cumulative)
2044
+ - sout: no. of bytes the system has swapped out from disk (cumulative)
2045
+
2046
+ 'sin' and 'sout' on Windows are meaningless and always set to 0.
2047
+ """
2048
+ return _psplatform.swap_memory()
2049
+
2050
+
2051
+ # =====================================================================
2052
+ # --- disks/partitions related functions
2053
+ # =====================================================================
2054
+
2055
+
2056
+ def disk_usage(path):
2057
+ """Return disk usage statistics about the given *path* as a
2058
+ namedtuple including total, used and free space expressed in bytes
2059
+ plus the percentage usage.
2060
+ """
2061
+ return _psplatform.disk_usage(path)
2062
+
2063
+
2064
+ def disk_partitions(all=False):
2065
+ """Return mounted partitions as a list of
2066
+ (device, mountpoint, fstype, opts) namedtuple.
2067
+ 'opts' field is a raw string separated by commas indicating mount
2068
+ options which may vary depending on the platform.
2069
+
2070
+ If *all* parameter is False return physical devices only and ignore
2071
+ all others.
2072
+ """
2073
+ return _psplatform.disk_partitions(all)
2074
+
2075
+
2076
+ def disk_io_counters(perdisk=False, nowrap=True):
2077
+ """Return system disk I/O statistics as a namedtuple including
2078
+ the following fields:
2079
+
2080
+ - read_count: number of reads
2081
+ - write_count: number of writes
2082
+ - read_bytes: number of bytes read
2083
+ - write_bytes: number of bytes written
2084
+ - read_time: time spent reading from disk (in ms)
2085
+ - write_time: time spent writing to disk (in ms)
2086
+
2087
+ Platform specific:
2088
+
2089
+ - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
2090
+ - read_merged_count (Linux): number of merged reads
2091
+ - write_merged_count (Linux): number of merged writes
2092
+
2093
+ If *perdisk* is True return the same information for every
2094
+ physical disk installed on the system as a dictionary
2095
+ with partition names as the keys and the namedtuple
2096
+ described above as the values.
2097
+
2098
+ If *nowrap* is True it detects and adjust the numbers which overflow
2099
+ and wrap (restart from 0) and add "old value" to "new value" so that
2100
+ the returned numbers will always be increasing or remain the same,
2101
+ but never decrease.
2102
+ "disk_io_counters.cache_clear()" can be used to invalidate the
2103
+ cache.
2104
+
2105
+ On recent Windows versions 'diskperf -y' command may need to be
2106
+ executed first otherwise this function won't find any disk.
2107
+ """
2108
+ kwargs = dict(perdisk=perdisk) if LINUX else {}
2109
+ rawdict = _psplatform.disk_io_counters(**kwargs)
2110
+ if not rawdict:
2111
+ return {} if perdisk else None
2112
+ if nowrap:
2113
+ rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
2114
+ nt = getattr(_psplatform, "sdiskio", _common.sdiskio)
2115
+ if perdisk:
2116
+ for disk, fields in rawdict.items():
2117
+ rawdict[disk] = nt(*fields)
2118
+ return rawdict
2119
+ else:
2120
+ return nt(*(sum(x) for x in zip(*rawdict.values())))
2121
+
2122
+
2123
+ disk_io_counters.cache_clear = functools.partial(
2124
+ _wrap_numbers.cache_clear, 'psutil.disk_io_counters'
2125
+ )
2126
+ disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2127
+
2128
+
2129
+ # =====================================================================
2130
+ # --- network related functions
2131
+ # =====================================================================
2132
+
2133
+
2134
+ def net_io_counters(pernic=False, nowrap=True):
2135
+ """Return network I/O statistics as a namedtuple including
2136
+ the following fields:
2137
+
2138
+ - bytes_sent: number of bytes sent
2139
+ - bytes_recv: number of bytes received
2140
+ - packets_sent: number of packets sent
2141
+ - packets_recv: number of packets received
2142
+ - errin: total number of errors while receiving
2143
+ - errout: total number of errors while sending
2144
+ - dropin: total number of incoming packets which were dropped
2145
+ - dropout: total number of outgoing packets which were dropped
2146
+ (always 0 on macOS and BSD)
2147
+
2148
+ If *pernic* is True return the same information for every
2149
+ network interface installed on the system as a dictionary
2150
+ with network interface names as the keys and the namedtuple
2151
+ described above as the values.
2152
+
2153
+ If *nowrap* is True it detects and adjust the numbers which overflow
2154
+ and wrap (restart from 0) and add "old value" to "new value" so that
2155
+ the returned numbers will always be increasing or remain the same,
2156
+ but never decrease.
2157
+ "net_io_counters.cache_clear()" can be used to invalidate the
2158
+ cache.
2159
+ """
2160
+ rawdict = _psplatform.net_io_counters()
2161
+ if not rawdict:
2162
+ return {} if pernic else None
2163
+ if nowrap:
2164
+ rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
2165
+ if pernic:
2166
+ for nic, fields in rawdict.items():
2167
+ rawdict[nic] = _common.snetio(*fields)
2168
+ return rawdict
2169
+ else:
2170
+ return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
2171
+
2172
+
2173
+ net_io_counters.cache_clear = functools.partial(
2174
+ _wrap_numbers.cache_clear, 'psutil.net_io_counters'
2175
+ )
2176
+ net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2177
+
2178
+
2179
+ def net_connections(kind='inet'):
2180
+ """Return system-wide socket connections as a list of
2181
+ (fd, family, type, laddr, raddr, status, pid) namedtuples.
2182
+ In case of limited privileges 'fd' and 'pid' may be set to -1
2183
+ and None respectively.
2184
+ The *kind* parameter filters for connections that fit the
2185
+ following criteria:
2186
+
2187
+ +------------+----------------------------------------------------+
2188
+ | Kind Value | Connections using |
2189
+ +------------+----------------------------------------------------+
2190
+ | inet | IPv4 and IPv6 |
2191
+ | inet4 | IPv4 |
2192
+ | inet6 | IPv6 |
2193
+ | tcp | TCP |
2194
+ | tcp4 | TCP over IPv4 |
2195
+ | tcp6 | TCP over IPv6 |
2196
+ | udp | UDP |
2197
+ | udp4 | UDP over IPv4 |
2198
+ | udp6 | UDP over IPv6 |
2199
+ | unix | UNIX socket (both UDP and TCP protocols) |
2200
+ | all | the sum of all the possible families and protocols |
2201
+ +------------+----------------------------------------------------+
2202
+
2203
+ On macOS this function requires root privileges.
2204
+ """
2205
+ return _psplatform.net_connections(kind)
2206
+
2207
+
2208
+ def net_if_addrs():
2209
+ """Return the addresses associated to each NIC (network interface
2210
+ card) installed on the system as a dictionary whose keys are the
2211
+ NIC names and value is a list of namedtuples for each address
2212
+ assigned to the NIC. Each namedtuple includes 5 fields:
2213
+
2214
+ - family: can be either socket.AF_INET, socket.AF_INET6 or
2215
+ psutil.AF_LINK, which refers to a MAC address.
2216
+ - address: is the primary address and it is always set.
2217
+ - netmask: and 'broadcast' and 'ptp' may be None.
2218
+ - ptp: stands for "point to point" and references the
2219
+ destination address on a point to point interface
2220
+ (typically a VPN).
2221
+ - broadcast: and *ptp* are mutually exclusive.
2222
+
2223
+ Note: you can have more than one address of the same family
2224
+ associated with each interface.
2225
+ """
2226
+ has_enums = _PY3
2227
+ if has_enums:
2228
+ import socket
2229
+ rawlist = _psplatform.net_if_addrs()
2230
+ rawlist.sort(key=lambda x: x[1]) # sort by family
2231
+ ret = collections.defaultdict(list)
2232
+ for name, fam, addr, mask, broadcast, ptp in rawlist:
2233
+ if has_enums:
2234
+ try:
2235
+ fam = socket.AddressFamily(fam)
2236
+ except ValueError:
2237
+ if WINDOWS and fam == -1:
2238
+ fam = _psplatform.AF_LINK
2239
+ elif (
2240
+ hasattr(_psplatform, "AF_LINK")
2241
+ and fam == _psplatform.AF_LINK
2242
+ ):
2243
+ # Linux defines AF_LINK as an alias for AF_PACKET.
2244
+ # We re-set the family here so that repr(family)
2245
+ # will show AF_LINK rather than AF_PACKET
2246
+ fam = _psplatform.AF_LINK
2247
+ if fam == _psplatform.AF_LINK:
2248
+ # The underlying C function may return an incomplete MAC
2249
+ # address in which case we fill it with null bytes, see:
2250
+ # https://github.com/giampaolo/psutil/issues/786
2251
+ separator = ":" if POSIX else "-"
2252
+ while addr.count(separator) < 5:
2253
+ addr += "%s00" % separator
2254
+ ret[name].append(_common.snicaddr(fam, addr, mask, broadcast, ptp))
2255
+ return dict(ret)
2256
+
2257
+
2258
+ def net_if_stats():
2259
+ """Return information about each NIC (network interface card)
2260
+ installed on the system as a dictionary whose keys are the
2261
+ NIC names and value is a namedtuple with the following fields:
2262
+
2263
+ - isup: whether the interface is up (bool)
2264
+ - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
2265
+ NIC_DUPLEX_UNKNOWN
2266
+ - speed: the NIC speed expressed in mega bits (MB); if it can't
2267
+ be determined (e.g. 'localhost') it will be set to 0.
2268
+ - mtu: the maximum transmission unit expressed in bytes.
2269
+ """
2270
+ return _psplatform.net_if_stats()
2271
+
2272
+
2273
+ # =====================================================================
2274
+ # --- sensors
2275
+ # =====================================================================
2276
+
2277
+
2278
+ # Linux, macOS
2279
+ if hasattr(_psplatform, "sensors_temperatures"):
2280
+
2281
+ def sensors_temperatures(fahrenheit=False):
2282
+ """Return hardware temperatures. Each entry is a namedtuple
2283
+ representing a certain hardware sensor (it may be a CPU, an
2284
+ hard disk or something else, depending on the OS and its
2285
+ configuration).
2286
+ All temperatures are expressed in celsius unless *fahrenheit*
2287
+ is set to True.
2288
+ """
2289
+
2290
+ def convert(n):
2291
+ if n is not None:
2292
+ return (float(n) * 9 / 5) + 32 if fahrenheit else n
2293
+
2294
+ ret = collections.defaultdict(list)
2295
+ rawdict = _psplatform.sensors_temperatures()
2296
+
2297
+ for name, values in rawdict.items():
2298
+ while values:
2299
+ label, current, high, critical = values.pop(0)
2300
+ current = convert(current)
2301
+ high = convert(high)
2302
+ critical = convert(critical)
2303
+
2304
+ if high and not critical:
2305
+ critical = high
2306
+ elif critical and not high:
2307
+ high = critical
2308
+
2309
+ ret[name].append(
2310
+ _common.shwtemp(label, current, high, critical)
2311
+ )
2312
+
2313
+ return dict(ret)
2314
+
2315
+ __all__.append("sensors_temperatures")
2316
+
2317
+
2318
+ # Linux
2319
+ if hasattr(_psplatform, "sensors_fans"):
2320
+
2321
+ def sensors_fans():
2322
+ """Return fans speed. Each entry is a namedtuple
2323
+ representing a certain hardware sensor.
2324
+ All speed are expressed in RPM (rounds per minute).
2325
+ """
2326
+ return _psplatform.sensors_fans()
2327
+
2328
+ __all__.append("sensors_fans")
2329
+
2330
+
2331
+ # Linux, Windows, FreeBSD, macOS
2332
+ if hasattr(_psplatform, "sensors_battery"):
2333
+
2334
+ def sensors_battery():
2335
+ """Return battery information. If no battery is installed
2336
+ returns None.
2337
+
2338
+ - percent: battery power left as a percentage.
2339
+ - secsleft: a rough approximation of how many seconds are left
2340
+ before the battery runs out of power. May be
2341
+ POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
2342
+ - power_plugged: True if the AC power cable is connected.
2343
+ """
2344
+ return _psplatform.sensors_battery()
2345
+
2346
+ __all__.append("sensors_battery")
2347
+
2348
+
2349
+ # =====================================================================
2350
+ # --- other system related functions
2351
+ # =====================================================================
2352
+
2353
+
2354
+ def boot_time():
2355
+ """Return the system boot time expressed in seconds since the epoch."""
2356
+ # Note: we are not caching this because it is subject to
2357
+ # system clock updates.
2358
+ return _psplatform.boot_time()
2359
+
2360
+
2361
+ def users():
2362
+ """Return users currently connected on the system as a list of
2363
+ namedtuples including the following fields.
2364
+
2365
+ - user: the name of the user
2366
+ - terminal: the tty or pseudo-tty associated with the user, if any.
2367
+ - host: the host name associated with the entry, if any.
2368
+ - started: the creation time as a floating point number expressed in
2369
+ seconds since the epoch.
2370
+ """
2371
+ return _psplatform.users()
2372
+
2373
+
2374
+ # =====================================================================
2375
+ # --- Windows services
2376
+ # =====================================================================
2377
+
2378
+
2379
+ if WINDOWS:
2380
+
2381
+ def win_service_iter():
2382
+ """Return a generator yielding a WindowsService instance for all
2383
+ Windows services installed.
2384
+ """
2385
+ return _psplatform.win_service_iter()
2386
+
2387
+ def win_service_get(name):
2388
+ """Get a Windows service by *name*.
2389
+ Raise NoSuchProcess if no service with such name exists.
2390
+ """
2391
+ return _psplatform.win_service_get(name)
2392
+
2393
+
2394
+ # =====================================================================
2395
+
2396
+
2397
+ def _set_debug(value):
2398
+ """Enable or disable PSUTIL_DEBUG option, which prints debugging
2399
+ messages to stderr.
2400
+ """
2401
+ import psutil._common
2402
+
2403
+ psutil._common.PSUTIL_DEBUG = bool(value)
2404
+ _psplatform.cext.set_debug(bool(value))
2405
+
2406
+
2407
+ def test(): # pragma: no cover
2408
+ from ._common import bytes2human
2409
+ from ._compat import get_terminal_size
2410
+
2411
+ today_day = datetime.date.today()
2412
+ # fmt: off
2413
+ templ = "%-10s %5s %5s %7s %7s %5s %6s %6s %6s %s"
2414
+ attrs = ['pid', 'memory_percent', 'name', 'cmdline', 'cpu_times',
2415
+ 'create_time', 'memory_info', 'status', 'nice', 'username']
2416
+ print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "NICE", # NOQA
2417
+ "STATUS", "START", "TIME", "CMDLINE"))
2418
+ # fmt: on
2419
+ for p in process_iter(attrs, ad_value=None):
2420
+ if p.info['create_time']:
2421
+ ctime = datetime.datetime.fromtimestamp(p.info['create_time'])
2422
+ if ctime.date() == today_day:
2423
+ ctime = ctime.strftime("%H:%M")
2424
+ else:
2425
+ ctime = ctime.strftime("%b%d")
2426
+ else:
2427
+ ctime = ''
2428
+ if p.info['cpu_times']:
2429
+ cputime = time.strftime(
2430
+ "%M:%S", time.localtime(sum(p.info['cpu_times']))
2431
+ )
2432
+ else:
2433
+ cputime = ''
2434
+
2435
+ user = p.info['username'] or ''
2436
+ if not user and POSIX:
2437
+ try:
2438
+ user = p.uids()[0]
2439
+ except Error:
2440
+ pass
2441
+ if user and WINDOWS and '\\' in user:
2442
+ user = user.split('\\')[1]
2443
+ user = user[:9]
2444
+ vms = (
2445
+ bytes2human(p.info['memory_info'].vms)
2446
+ if p.info['memory_info'] is not None
2447
+ else ''
2448
+ )
2449
+ rss = (
2450
+ bytes2human(p.info['memory_info'].rss)
2451
+ if p.info['memory_info'] is not None
2452
+ else ''
2453
+ )
2454
+ memp = (
2455
+ round(p.info['memory_percent'], 1)
2456
+ if p.info['memory_percent'] is not None
2457
+ else ''
2458
+ )
2459
+ nice = int(p.info['nice']) if p.info['nice'] else ''
2460
+ if p.info['cmdline']:
2461
+ cmdline = ' '.join(p.info['cmdline'])
2462
+ else:
2463
+ cmdline = p.info['name']
2464
+ status = p.info['status'][:5] if p.info['status'] else ''
2465
+
2466
+ line = templ % (
2467
+ user[:10],
2468
+ p.info['pid'],
2469
+ memp,
2470
+ vms,
2471
+ rss,
2472
+ nice,
2473
+ status,
2474
+ ctime,
2475
+ cputime,
2476
+ cmdline,
2477
+ )
2478
+ print(line[: get_terminal_size()[0]]) # NOQA
2479
+
2480
+
2481
+ del memoize_when_activated, division
2482
+ if sys.version_info[0] < 3:
2483
+ del num, x # noqa
2484
+
2485
+ if __name__ == "__main__":
2486
+ test()
.venv/lib/python3.11/site-packages/psutil/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (98.3 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/__pycache__/_common.cpython-311.pyc ADDED
Binary file (38.2 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/__pycache__/_compat.cpython-311.pyc ADDED
Binary file (20.8 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/__pycache__/_psaix.cpython-311.pyc ADDED
Binary file (26.9 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/__pycache__/_psbsd.cpython-311.pyc ADDED
Binary file (38.2 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/__pycache__/_psosx.cpython-311.pyc ADDED
Binary file (23.3 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/__pycache__/_psposix.cpython-311.pyc ADDED
Binary file (7.43 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/__pycache__/_pssunos.cpython-311.pyc ADDED
Binary file (33.1 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/__pycache__/_pswindows.cpython-311.pyc ADDED
Binary file (50 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/_psposix.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2
+ # Use of this source code is governed by a BSD-style license that can be
3
+ # found in the LICENSE file.
4
+
5
+ """Routines common to all posix systems."""
6
+
7
+ import glob
8
+ import os
9
+ import signal
10
+ import sys
11
+ import time
12
+
13
+ from ._common import MACOS
14
+ from ._common import TimeoutExpired
15
+ from ._common import memoize
16
+ from ._common import sdiskusage
17
+ from ._common import usage_percent
18
+ from ._compat import PY3
19
+ from ._compat import ChildProcessError
20
+ from ._compat import FileNotFoundError
21
+ from ._compat import InterruptedError
22
+ from ._compat import PermissionError
23
+ from ._compat import ProcessLookupError
24
+ from ._compat import unicode
25
+
26
+
27
+ if MACOS:
28
+ from . import _psutil_osx
29
+
30
+
31
+ if PY3:
32
+ import enum
33
+ else:
34
+ enum = None
35
+
36
+
37
+ __all__ = ['pid_exists', 'wait_pid', 'disk_usage', 'get_terminal_map']
38
+
39
+
40
+ def pid_exists(pid):
41
+ """Check whether pid exists in the current process table."""
42
+ if pid == 0:
43
+ # According to "man 2 kill" PID 0 has a special meaning:
44
+ # it refers to <<every process in the process group of the
45
+ # calling process>> so we don't want to go any further.
46
+ # If we get here it means this UNIX platform *does* have
47
+ # a process with id 0.
48
+ return True
49
+ try:
50
+ os.kill(pid, 0)
51
+ except ProcessLookupError:
52
+ return False
53
+ except PermissionError:
54
+ # EPERM clearly means there's a process to deny access to
55
+ return True
56
+ # According to "man 2 kill" possible error values are
57
+ # (EINVAL, EPERM, ESRCH)
58
+ else:
59
+ return True
60
+
61
+
62
+ # Python 3.5 signals enum (contributed by me ^^):
63
+ # https://bugs.python.org/issue21076
64
+ if enum is not None and hasattr(signal, "Signals"):
65
+ Negsignal = enum.IntEnum(
66
+ 'Negsignal', dict([(x.name, -x.value) for x in signal.Signals])
67
+ )
68
+
69
+ def negsig_to_enum(num):
70
+ """Convert a negative signal value to an enum."""
71
+ try:
72
+ return Negsignal(num)
73
+ except ValueError:
74
+ return num
75
+
76
+ else: # pragma: no cover
77
+
78
+ def negsig_to_enum(num):
79
+ return num
80
+
81
+
82
+ def wait_pid(
83
+ pid,
84
+ timeout=None,
85
+ proc_name=None,
86
+ _waitpid=os.waitpid,
87
+ _timer=getattr(time, 'monotonic', time.time), # noqa: B008
88
+ _min=min,
89
+ _sleep=time.sleep,
90
+ _pid_exists=pid_exists,
91
+ ):
92
+ """Wait for a process PID to terminate.
93
+
94
+ If the process terminated normally by calling exit(3) or _exit(2),
95
+ or by returning from main(), the return value is the positive integer
96
+ passed to *exit().
97
+
98
+ If it was terminated by a signal it returns the negated value of the
99
+ signal which caused the termination (e.g. -SIGTERM).
100
+
101
+ If PID is not a children of os.getpid() (current process) just
102
+ wait until the process disappears and return None.
103
+
104
+ If PID does not exist at all return None immediately.
105
+
106
+ If *timeout* != None and process is still alive raise TimeoutExpired.
107
+ timeout=0 is also possible (either return immediately or raise).
108
+ """
109
+ if pid <= 0:
110
+ # see "man waitpid"
111
+ msg = "can't wait for PID 0"
112
+ raise ValueError(msg)
113
+ interval = 0.0001
114
+ flags = 0
115
+ if timeout is not None:
116
+ flags |= os.WNOHANG
117
+ stop_at = _timer() + timeout
118
+
119
+ def sleep(interval):
120
+ # Sleep for some time and return a new increased interval.
121
+ if timeout is not None:
122
+ if _timer() >= stop_at:
123
+ raise TimeoutExpired(timeout, pid=pid, name=proc_name)
124
+ _sleep(interval)
125
+ return _min(interval * 2, 0.04)
126
+
127
+ # See: https://linux.die.net/man/2/waitpid
128
+ while True:
129
+ try:
130
+ retpid, status = os.waitpid(pid, flags)
131
+ except InterruptedError:
132
+ interval = sleep(interval)
133
+ except ChildProcessError:
134
+ # This has two meanings:
135
+ # - PID is not a child of os.getpid() in which case
136
+ # we keep polling until it's gone
137
+ # - PID never existed in the first place
138
+ # In both cases we'll eventually return None as we
139
+ # can't determine its exit status code.
140
+ while _pid_exists(pid):
141
+ interval = sleep(interval)
142
+ return
143
+ else:
144
+ if retpid == 0:
145
+ # WNOHANG flag was used and PID is still running.
146
+ interval = sleep(interval)
147
+ continue
148
+
149
+ if os.WIFEXITED(status):
150
+ # Process terminated normally by calling exit(3) or _exit(2),
151
+ # or by returning from main(). The return value is the
152
+ # positive integer passed to *exit().
153
+ return os.WEXITSTATUS(status)
154
+ elif os.WIFSIGNALED(status):
155
+ # Process exited due to a signal. Return the negative value
156
+ # of that signal.
157
+ return negsig_to_enum(-os.WTERMSIG(status))
158
+ # elif os.WIFSTOPPED(status):
159
+ # # Process was stopped via SIGSTOP or is being traced, and
160
+ # # waitpid() was called with WUNTRACED flag. PID is still
161
+ # # alive. From now on waitpid() will keep returning (0, 0)
162
+ # # until the process state doesn't change.
163
+ # # It may make sense to catch/enable this since stopped PIDs
164
+ # # ignore SIGTERM.
165
+ # interval = sleep(interval)
166
+ # continue
167
+ # elif os.WIFCONTINUED(status):
168
+ # # Process was resumed via SIGCONT and waitpid() was called
169
+ # # with WCONTINUED flag.
170
+ # interval = sleep(interval)
171
+ # continue
172
+ else:
173
+ # Should never happen.
174
+ raise ValueError("unknown process exit status %r" % status)
175
+
176
+
177
+ def disk_usage(path):
178
+ """Return disk usage associated with path.
179
+ Note: UNIX usually reserves 5% disk space which is not accessible
180
+ by user. In this function "total" and "used" values reflect the
181
+ total and used disk space whereas "free" and "percent" represent
182
+ the "free" and "used percent" user disk space.
183
+ """
184
+ if PY3:
185
+ st = os.statvfs(path)
186
+ else: # pragma: no cover
187
+ # os.statvfs() does not support unicode on Python 2:
188
+ # - https://github.com/giampaolo/psutil/issues/416
189
+ # - http://bugs.python.org/issue18695
190
+ try:
191
+ st = os.statvfs(path)
192
+ except UnicodeEncodeError:
193
+ if isinstance(path, unicode):
194
+ try:
195
+ path = path.encode(sys.getfilesystemencoding())
196
+ except UnicodeEncodeError:
197
+ pass
198
+ st = os.statvfs(path)
199
+ else:
200
+ raise
201
+
202
+ # Total space which is only available to root (unless changed
203
+ # at system level).
204
+ total = st.f_blocks * st.f_frsize
205
+ # Remaining free space usable by root.
206
+ avail_to_root = st.f_bfree * st.f_frsize
207
+ # Remaining free space usable by user.
208
+ avail_to_user = st.f_bavail * st.f_frsize
209
+ # Total space being used in general.
210
+ used = total - avail_to_root
211
+ if MACOS:
212
+ # see: https://github.com/giampaolo/psutil/pull/2152
213
+ used = _psutil_osx.disk_usage_used(path, used)
214
+ # Total space which is available to user (same as 'total' but
215
+ # for the user).
216
+ total_user = used + avail_to_user
217
+ # User usage percent compared to the total amount of space
218
+ # the user can use. This number would be higher if compared
219
+ # to root's because the user has less space (usually -5%).
220
+ usage_percent_user = usage_percent(used, total_user, round_=1)
221
+
222
+ # NB: the percentage is -5% than what shown by df due to
223
+ # reserved blocks that we are currently not considering:
224
+ # https://github.com/giampaolo/psutil/issues/829#issuecomment-223750462
225
+ return sdiskusage(
226
+ total=total, used=used, free=avail_to_user, percent=usage_percent_user
227
+ )
228
+
229
+
230
+ @memoize
231
+ def get_terminal_map():
232
+ """Get a map of device-id -> path as a dict.
233
+ Used by Process.terminal().
234
+ """
235
+ ret = {}
236
+ ls = glob.glob('/dev/tty*') + glob.glob('/dev/pts/*')
237
+ for name in ls:
238
+ assert name not in ret, name
239
+ try:
240
+ ret[os.stat(name).st_rdev] = name
241
+ except FileNotFoundError:
242
+ pass
243
+ return ret
.venv/lib/python3.11/site-packages/psutil/tests/__init__.py ADDED
@@ -0,0 +1,2113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
4
+ # Use of this source code is governed by a BSD-style license that can be
5
+ # found in the LICENSE file.
6
+
7
+ """Test utilities."""
8
+
9
+ from __future__ import print_function
10
+
11
+ import atexit
12
+ import contextlib
13
+ import ctypes
14
+ import errno
15
+ import functools
16
+ import gc
17
+ import os
18
+ import platform
19
+ import random
20
+ import re
21
+ import select
22
+ import shlex
23
+ import shutil
24
+ import signal
25
+ import socket
26
+ import stat
27
+ import subprocess
28
+ import sys
29
+ import tempfile
30
+ import textwrap
31
+ import threading
32
+ import time
33
+ import unittest
34
+ import warnings
35
+ from socket import AF_INET
36
+ from socket import AF_INET6
37
+ from socket import SOCK_STREAM
38
+
39
+
40
+ try:
41
+ import pytest
42
+ except ImportError:
43
+ pytest = None
44
+
45
+ import psutil
46
+ from psutil import AIX
47
+ from psutil import LINUX
48
+ from psutil import MACOS
49
+ from psutil import NETBSD
50
+ from psutil import OPENBSD
51
+ from psutil import POSIX
52
+ from psutil import SUNOS
53
+ from psutil import WINDOWS
54
+ from psutil._common import bytes2human
55
+ from psutil._common import debug
56
+ from psutil._common import memoize
57
+ from psutil._common import print_color
58
+ from psutil._common import supports_ipv6
59
+ from psutil._compat import PY3
60
+ from psutil._compat import FileExistsError
61
+ from psutil._compat import FileNotFoundError
62
+ from psutil._compat import range
63
+ from psutil._compat import super
64
+ from psutil._compat import unicode
65
+ from psutil._compat import which
66
+
67
+
68
+ try:
69
+ from unittest import mock # py3
70
+ except ImportError:
71
+ with warnings.catch_warnings():
72
+ warnings.simplefilter("ignore")
73
+ import mock # NOQA - requires "pip install mock"
74
+
75
+ if PY3:
76
+ import enum
77
+ else:
78
+ import unittest2 as unittest
79
+
80
+ enum = None
81
+
82
+ if POSIX:
83
+ from psutil._psposix import wait_pid
84
+
85
+
86
+ # fmt: off
87
+ __all__ = [
88
+ # constants
89
+ 'APPVEYOR', 'DEVNULL', 'GLOBAL_TIMEOUT', 'TOLERANCE_SYS_MEM', 'NO_RETRIES',
90
+ 'PYPY', 'PYTHON_EXE', 'PYTHON_EXE_ENV', 'ROOT_DIR', 'SCRIPTS_DIR',
91
+ 'TESTFN_PREFIX', 'UNICODE_SUFFIX', 'INVALID_UNICODE_SUFFIX',
92
+ 'CI_TESTING', 'VALID_PROC_STATUSES', 'TOLERANCE_DISK_USAGE', 'IS_64BIT',
93
+ "HAS_CPU_AFFINITY", "HAS_CPU_FREQ", "HAS_ENVIRON", "HAS_PROC_IO_COUNTERS",
94
+ "HAS_IONICE", "HAS_MEMORY_MAPS", "HAS_PROC_CPU_NUM", "HAS_RLIMIT",
95
+ "HAS_SENSORS_BATTERY", "HAS_BATTERY", "HAS_SENSORS_FANS",
96
+ "HAS_SENSORS_TEMPERATURES", "HAS_NET_CONNECTIONS_UNIX", "MACOS_11PLUS",
97
+ "MACOS_12PLUS", "COVERAGE", 'AARCH64', "QEMU_USER", "PYTEST_PARALLEL",
98
+ # subprocesses
99
+ 'pyrun', 'terminate', 'reap_children', 'spawn_testproc', 'spawn_zombie',
100
+ 'spawn_children_pair',
101
+ # threads
102
+ 'ThreadTask',
103
+ # test utils
104
+ 'unittest', 'skip_on_access_denied', 'skip_on_not_implemented',
105
+ 'retry_on_failure', 'TestMemoryLeak', 'PsutilTestCase',
106
+ 'process_namespace', 'system_namespace', 'print_sysinfo',
107
+ 'is_win_secure_system_proc', 'fake_pytest',
108
+ # fs utils
109
+ 'chdir', 'safe_rmpath', 'create_py_exe', 'create_c_exe', 'get_testfn',
110
+ # os
111
+ 'get_winver', 'kernel_version',
112
+ # sync primitives
113
+ 'call_until', 'wait_for_pid', 'wait_for_file',
114
+ # network
115
+ 'check_net_address', 'filter_proc_net_connections',
116
+ 'get_free_port', 'bind_socket', 'bind_unix_socket', 'tcp_socketpair',
117
+ 'unix_socketpair', 'create_sockets',
118
+ # compat
119
+ 'reload_module', 'import_module_by_path',
120
+ # others
121
+ 'warn', 'copyload_shared_lib', 'is_namedtuple',
122
+ ]
123
+ # fmt: on
124
+
125
+
126
+ # ===================================================================
127
+ # --- constants
128
+ # ===================================================================
129
+
130
+ # --- platforms
131
+
132
+ PYPY = '__pypy__' in sys.builtin_module_names
133
+ # whether we're running this test suite on a Continuous Integration service
134
+ APPVEYOR = 'APPVEYOR' in os.environ
135
+ GITHUB_ACTIONS = 'GITHUB_ACTIONS' in os.environ or 'CIBUILDWHEEL' in os.environ
136
+ CI_TESTING = APPVEYOR or GITHUB_ACTIONS
137
+ COVERAGE = 'COVERAGE_RUN' in os.environ
138
+ PYTEST_PARALLEL = "PYTEST_XDIST_WORKER" in os.environ # `make test-parallel`
139
+ if LINUX and GITHUB_ACTIONS:
140
+ with open('/proc/1/cmdline') as f:
141
+ QEMU_USER = "/bin/qemu-" in f.read()
142
+ else:
143
+ QEMU_USER = False
144
+ # are we a 64 bit process?
145
+ IS_64BIT = sys.maxsize > 2**32
146
+ AARCH64 = platform.machine() == "aarch64"
147
+
148
+
149
+ @memoize
150
+ def macos_version():
151
+ version_str = platform.mac_ver()[0]
152
+ version = tuple(map(int, version_str.split(".")[:2]))
153
+ if version == (10, 16):
154
+ # When built against an older macOS SDK, Python will report
155
+ # macOS 10.16 instead of the real version.
156
+ version_str = subprocess.check_output(
157
+ [
158
+ sys.executable,
159
+ "-sS",
160
+ "-c",
161
+ "import platform; print(platform.mac_ver()[0])",
162
+ ],
163
+ env={"SYSTEM_VERSION_COMPAT": "0"},
164
+ universal_newlines=True,
165
+ )
166
+ version = tuple(map(int, version_str.split(".")[:2]))
167
+ return version
168
+
169
+
170
+ if MACOS:
171
+ MACOS_11PLUS = macos_version() > (10, 15)
172
+ MACOS_12PLUS = macos_version() >= (12, 0)
173
+ else:
174
+ MACOS_11PLUS = False
175
+ MACOS_12PLUS = False
176
+
177
+
178
+ # --- configurable defaults
179
+
180
+ # how many times retry_on_failure() decorator will retry
181
+ NO_RETRIES = 10
182
+ # bytes tolerance for system-wide related tests
183
+ TOLERANCE_SYS_MEM = 5 * 1024 * 1024 # 5MB
184
+ TOLERANCE_DISK_USAGE = 10 * 1024 * 1024 # 10MB
185
+ # the timeout used in functions which have to wait
186
+ GLOBAL_TIMEOUT = 5
187
+ # be more tolerant if we're on CI in order to avoid false positives
188
+ if CI_TESTING:
189
+ NO_RETRIES *= 3
190
+ GLOBAL_TIMEOUT *= 3
191
+ TOLERANCE_SYS_MEM *= 4
192
+ TOLERANCE_DISK_USAGE *= 3
193
+
194
+ # --- file names
195
+
196
+ # Disambiguate TESTFN for parallel testing.
197
+ if os.name == 'java':
198
+ # Jython disallows @ in module names
199
+ TESTFN_PREFIX = '$psutil-%s-' % os.getpid()
200
+ else:
201
+ TESTFN_PREFIX = '@psutil-%s-' % os.getpid()
202
+ UNICODE_SUFFIX = u"-ƒőő"
203
+ # An invalid unicode string.
204
+ if PY3:
205
+ INVALID_UNICODE_SUFFIX = b"f\xc0\x80".decode('utf8', 'surrogateescape')
206
+ else:
207
+ INVALID_UNICODE_SUFFIX = "f\xc0\x80"
208
+ ASCII_FS = sys.getfilesystemencoding().lower() in {'ascii', 'us-ascii'}
209
+
210
+ # --- paths
211
+
212
+ ROOT_DIR = os.path.realpath(
213
+ os.path.join(os.path.dirname(__file__), '..', '..')
214
+ )
215
+ SCRIPTS_DIR = os.environ.get(
216
+ "PSUTIL_SCRIPTS_DIR", os.path.join(ROOT_DIR, 'scripts')
217
+ )
218
+ HERE = os.path.realpath(os.path.dirname(__file__))
219
+
220
+ # --- support
221
+
222
+ HAS_CPU_AFFINITY = hasattr(psutil.Process, "cpu_affinity")
223
+ HAS_CPU_FREQ = hasattr(psutil, "cpu_freq")
224
+ HAS_ENVIRON = hasattr(psutil.Process, "environ")
225
+ HAS_GETLOADAVG = hasattr(psutil, "getloadavg")
226
+ HAS_IONICE = hasattr(psutil.Process, "ionice")
227
+ HAS_MEMORY_MAPS = hasattr(psutil.Process, "memory_maps")
228
+ HAS_NET_CONNECTIONS_UNIX = POSIX and not SUNOS
229
+ HAS_NET_IO_COUNTERS = hasattr(psutil, "net_io_counters")
230
+ HAS_PROC_CPU_NUM = hasattr(psutil.Process, "cpu_num")
231
+ HAS_PROC_IO_COUNTERS = hasattr(psutil.Process, "io_counters")
232
+ HAS_RLIMIT = hasattr(psutil.Process, "rlimit")
233
+ HAS_SENSORS_BATTERY = hasattr(psutil, "sensors_battery")
234
+ try:
235
+ HAS_BATTERY = HAS_SENSORS_BATTERY and bool(psutil.sensors_battery())
236
+ except Exception: # noqa: BLE001
237
+ HAS_BATTERY = False
238
+ HAS_SENSORS_FANS = hasattr(psutil, "sensors_fans")
239
+ HAS_SENSORS_TEMPERATURES = hasattr(psutil, "sensors_temperatures")
240
+ HAS_THREADS = hasattr(psutil.Process, "threads")
241
+ SKIP_SYSCONS = (MACOS or AIX) and os.getuid() != 0
242
+
243
+ # --- misc
244
+
245
+
246
+ def _get_py_exe():
247
+ def attempt(exe):
248
+ try:
249
+ subprocess.check_call(
250
+ [exe, "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
251
+ )
252
+ except subprocess.CalledProcessError:
253
+ return None
254
+ else:
255
+ return exe
256
+
257
+ env = os.environ.copy()
258
+
259
+ # On Windows, starting with python 3.7, virtual environments use a
260
+ # venv launcher startup process. This does not play well when
261
+ # counting spawned processes, or when relying on the PID of the
262
+ # spawned process to do some checks, e.g. connections check per PID.
263
+ # Let's use the base python in this case.
264
+ base = getattr(sys, "_base_executable", None)
265
+ if WINDOWS and sys.version_info >= (3, 7) and base is not None:
266
+ # We need to set __PYVENV_LAUNCHER__ to sys.executable for the
267
+ # base python executable to know about the environment.
268
+ env["__PYVENV_LAUNCHER__"] = sys.executable
269
+ return base, env
270
+ elif GITHUB_ACTIONS:
271
+ return sys.executable, env
272
+ elif MACOS:
273
+ exe = (
274
+ attempt(sys.executable)
275
+ or attempt(os.path.realpath(sys.executable))
276
+ or attempt(which("python%s.%s" % sys.version_info[:2]))
277
+ or attempt(psutil.Process().exe())
278
+ )
279
+ if not exe:
280
+ raise ValueError("can't find python exe real abspath")
281
+ return exe, env
282
+ else:
283
+ exe = os.path.realpath(sys.executable)
284
+ assert os.path.exists(exe), exe
285
+ return exe, env
286
+
287
+
288
+ PYTHON_EXE, PYTHON_EXE_ENV = _get_py_exe()
289
+ DEVNULL = open(os.devnull, 'r+')
290
+ atexit.register(DEVNULL.close)
291
+
292
+ VALID_PROC_STATUSES = [
293
+ getattr(psutil, x) for x in dir(psutil) if x.startswith('STATUS_')
294
+ ]
295
+ AF_UNIX = getattr(socket, "AF_UNIX", object())
296
+
297
+ _subprocesses_started = set()
298
+ _pids_started = set()
299
+
300
+
301
+ # ===================================================================
302
+ # --- threads
303
+ # ===================================================================
304
+
305
+
306
+ class ThreadTask(threading.Thread):
307
+ """A thread task which does nothing expect staying alive."""
308
+
309
+ def __init__(self):
310
+ super().__init__()
311
+ self._running = False
312
+ self._interval = 0.001
313
+ self._flag = threading.Event()
314
+
315
+ def __repr__(self):
316
+ name = self.__class__.__name__
317
+ return '<%s running=%s at %#x>' % (name, self._running, id(self))
318
+
319
+ def __enter__(self):
320
+ self.start()
321
+ return self
322
+
323
+ def __exit__(self, *args, **kwargs):
324
+ self.stop()
325
+
326
+ def start(self):
327
+ """Start thread and keep it running until an explicit
328
+ stop() request. Polls for shutdown every 'timeout' seconds.
329
+ """
330
+ if self._running:
331
+ raise ValueError("already started")
332
+ threading.Thread.start(self)
333
+ self._flag.wait()
334
+
335
+ def run(self):
336
+ self._running = True
337
+ self._flag.set()
338
+ while self._running:
339
+ time.sleep(self._interval)
340
+
341
+ def stop(self):
342
+ """Stop thread execution and and waits until it is stopped."""
343
+ if not self._running:
344
+ raise ValueError("already stopped")
345
+ self._running = False
346
+ self.join()
347
+
348
+
349
+ # ===================================================================
350
+ # --- subprocesses
351
+ # ===================================================================
352
+
353
+
354
+ def _reap_children_on_err(fun):
355
+ @functools.wraps(fun)
356
+ def wrapper(*args, **kwargs):
357
+ try:
358
+ return fun(*args, **kwargs)
359
+ except Exception:
360
+ reap_children()
361
+ raise
362
+
363
+ return wrapper
364
+
365
+
366
+ @_reap_children_on_err
367
+ def spawn_testproc(cmd=None, **kwds):
368
+ """Create a python subprocess which does nothing for some secs and
369
+ return it as a subprocess.Popen instance.
370
+ If "cmd" is specified that is used instead of python.
371
+ By default stdin and stdout are redirected to /dev/null.
372
+ It also attempts to make sure the process is in a reasonably
373
+ initialized state.
374
+ The process is registered for cleanup on reap_children().
375
+ """
376
+ kwds.setdefault("stdin", DEVNULL)
377
+ kwds.setdefault("stdout", DEVNULL)
378
+ kwds.setdefault("cwd", os.getcwd())
379
+ kwds.setdefault("env", PYTHON_EXE_ENV)
380
+ if WINDOWS:
381
+ # Prevents the subprocess to open error dialogs. This will also
382
+ # cause stderr to be suppressed, which is suboptimal in order
383
+ # to debug broken tests.
384
+ CREATE_NO_WINDOW = 0x8000000
385
+ kwds.setdefault("creationflags", CREATE_NO_WINDOW)
386
+ if cmd is None:
387
+ testfn = get_testfn(dir=os.getcwd())
388
+ try:
389
+ safe_rmpath(testfn)
390
+ pyline = (
391
+ "import time;"
392
+ + "open(r'%s', 'w').close();" % testfn
393
+ + "[time.sleep(0.1) for x in range(100)];" # 10 secs
394
+ )
395
+ cmd = [PYTHON_EXE, "-c", pyline]
396
+ sproc = subprocess.Popen(cmd, **kwds)
397
+ _subprocesses_started.add(sproc)
398
+ wait_for_file(testfn, delete=True, empty=True)
399
+ finally:
400
+ safe_rmpath(testfn)
401
+ else:
402
+ sproc = subprocess.Popen(cmd, **kwds)
403
+ _subprocesses_started.add(sproc)
404
+ wait_for_pid(sproc.pid)
405
+ return sproc
406
+
407
+
408
+ @_reap_children_on_err
409
+ def spawn_children_pair():
410
+ """Create a subprocess which creates another one as in:
411
+ A (us) -> B (child) -> C (grandchild).
412
+ Return a (child, grandchild) tuple.
413
+ The 2 processes are fully initialized and will live for 60 secs
414
+ and are registered for cleanup on reap_children().
415
+ """
416
+ tfile = None
417
+ testfn = get_testfn(dir=os.getcwd())
418
+ try:
419
+ s = textwrap.dedent("""\
420
+ import subprocess, os, sys, time
421
+ s = "import os, time;"
422
+ s += "f = open('%s', 'w');"
423
+ s += "f.write(str(os.getpid()));"
424
+ s += "f.close();"
425
+ s += "[time.sleep(0.1) for x in range(100 * 6)];"
426
+ p = subprocess.Popen([r'%s', '-c', s])
427
+ p.wait()
428
+ """ % (os.path.basename(testfn), PYTHON_EXE))
429
+ # On Windows if we create a subprocess with CREATE_NO_WINDOW flag
430
+ # set (which is the default) a "conhost.exe" extra process will be
431
+ # spawned as a child. We don't want that.
432
+ if WINDOWS:
433
+ subp, tfile = pyrun(s, creationflags=0)
434
+ else:
435
+ subp, tfile = pyrun(s)
436
+ child = psutil.Process(subp.pid)
437
+ grandchild_pid = int(wait_for_file(testfn, delete=True, empty=False))
438
+ _pids_started.add(grandchild_pid)
439
+ grandchild = psutil.Process(grandchild_pid)
440
+ return (child, grandchild)
441
+ finally:
442
+ safe_rmpath(testfn)
443
+ if tfile is not None:
444
+ safe_rmpath(tfile)
445
+
446
+
447
+ def spawn_zombie():
448
+ """Create a zombie process and return a (parent, zombie) process tuple.
449
+ In order to kill the zombie parent must be terminate()d first, then
450
+ zombie must be wait()ed on.
451
+ """
452
+ assert psutil.POSIX
453
+ unix_file = get_testfn()
454
+ src = textwrap.dedent("""\
455
+ import os, sys, time, socket, contextlib
456
+ child_pid = os.fork()
457
+ if child_pid > 0:
458
+ time.sleep(3000)
459
+ else:
460
+ # this is the zombie process
461
+ s = socket.socket(socket.AF_UNIX)
462
+ with contextlib.closing(s):
463
+ s.connect('%s')
464
+ if sys.version_info < (3, ):
465
+ pid = str(os.getpid())
466
+ else:
467
+ pid = bytes(str(os.getpid()), 'ascii')
468
+ s.sendall(pid)
469
+ """ % unix_file)
470
+ tfile = None
471
+ sock = bind_unix_socket(unix_file)
472
+ try:
473
+ sock.settimeout(GLOBAL_TIMEOUT)
474
+ parent, tfile = pyrun(src)
475
+ conn, _ = sock.accept()
476
+ try:
477
+ select.select([conn.fileno()], [], [], GLOBAL_TIMEOUT)
478
+ zpid = int(conn.recv(1024))
479
+ _pids_started.add(zpid)
480
+ zombie = psutil.Process(zpid)
481
+ call_until(lambda: zombie.status() == psutil.STATUS_ZOMBIE)
482
+ return (parent, zombie)
483
+ finally:
484
+ conn.close()
485
+ finally:
486
+ sock.close()
487
+ safe_rmpath(unix_file)
488
+ if tfile is not None:
489
+ safe_rmpath(tfile)
490
+
491
+
492
+ @_reap_children_on_err
493
+ def pyrun(src, **kwds):
494
+ """Run python 'src' code string in a separate interpreter.
495
+ Returns a subprocess.Popen instance and the test file where the source
496
+ code was written.
497
+ """
498
+ kwds.setdefault("stdout", None)
499
+ kwds.setdefault("stderr", None)
500
+ srcfile = get_testfn()
501
+ try:
502
+ with open(srcfile, "w") as f:
503
+ f.write(src)
504
+ subp = spawn_testproc([PYTHON_EXE, f.name], **kwds)
505
+ wait_for_pid(subp.pid)
506
+ return (subp, srcfile)
507
+ except Exception:
508
+ safe_rmpath(srcfile)
509
+ raise
510
+
511
+
512
+ @_reap_children_on_err
513
+ def sh(cmd, **kwds):
514
+ """Run cmd in a subprocess and return its output.
515
+ raises RuntimeError on error.
516
+ """
517
+ # Prevents subprocess to open error dialogs in case of error.
518
+ flags = 0x8000000 if WINDOWS else 0
519
+ kwds.setdefault("stdout", subprocess.PIPE)
520
+ kwds.setdefault("stderr", subprocess.PIPE)
521
+ kwds.setdefault("universal_newlines", True)
522
+ kwds.setdefault("creationflags", flags)
523
+ if isinstance(cmd, str):
524
+ cmd = shlex.split(cmd)
525
+ p = subprocess.Popen(cmd, **kwds)
526
+ _subprocesses_started.add(p)
527
+ if PY3:
528
+ stdout, stderr = p.communicate(timeout=GLOBAL_TIMEOUT)
529
+ else:
530
+ stdout, stderr = p.communicate()
531
+ if p.returncode != 0:
532
+ raise RuntimeError(stdout + stderr)
533
+ if stderr:
534
+ warn(stderr)
535
+ if stdout.endswith('\n'):
536
+ stdout = stdout[:-1]
537
+ return stdout
538
+
539
+
540
+ def terminate(proc_or_pid, sig=signal.SIGTERM, wait_timeout=GLOBAL_TIMEOUT):
541
+ """Terminate a process and wait() for it.
542
+ Process can be a PID or an instance of psutil.Process(),
543
+ subprocess.Popen() or psutil.Popen().
544
+ If it's a subprocess.Popen() or psutil.Popen() instance also closes
545
+ its stdin / stdout / stderr fds.
546
+ PID is wait()ed even if the process is already gone (kills zombies).
547
+ Does nothing if the process does not exist.
548
+ Return process exit status.
549
+ """
550
+
551
+ def wait(proc, timeout):
552
+ if isinstance(proc, subprocess.Popen) and not PY3:
553
+ proc.wait()
554
+ else:
555
+ proc.wait(timeout)
556
+ if WINDOWS and isinstance(proc, subprocess.Popen):
557
+ # Otherwise PID may still hang around.
558
+ try:
559
+ return psutil.Process(proc.pid).wait(timeout)
560
+ except psutil.NoSuchProcess:
561
+ pass
562
+
563
+ def sendsig(proc, sig):
564
+ # XXX: otherwise the build hangs for some reason.
565
+ if MACOS and GITHUB_ACTIONS:
566
+ sig = signal.SIGKILL
567
+ # If the process received SIGSTOP, SIGCONT is necessary first,
568
+ # otherwise SIGTERM won't work.
569
+ if POSIX and sig != signal.SIGKILL:
570
+ proc.send_signal(signal.SIGCONT)
571
+ proc.send_signal(sig)
572
+
573
+ def term_subprocess_proc(proc, timeout):
574
+ try:
575
+ sendsig(proc, sig)
576
+ except OSError as err:
577
+ if WINDOWS and err.winerror == 6: # "invalid handle"
578
+ pass
579
+ elif err.errno != errno.ESRCH:
580
+ raise
581
+ return wait(proc, timeout)
582
+
583
+ def term_psutil_proc(proc, timeout):
584
+ try:
585
+ sendsig(proc, sig)
586
+ except psutil.NoSuchProcess:
587
+ pass
588
+ return wait(proc, timeout)
589
+
590
+ def term_pid(pid, timeout):
591
+ try:
592
+ proc = psutil.Process(pid)
593
+ except psutil.NoSuchProcess:
594
+ # Needed to kill zombies.
595
+ if POSIX:
596
+ return wait_pid(pid, timeout)
597
+ else:
598
+ return term_psutil_proc(proc, timeout)
599
+
600
+ def flush_popen(proc):
601
+ if proc.stdout:
602
+ proc.stdout.close()
603
+ if proc.stderr:
604
+ proc.stderr.close()
605
+ # Flushing a BufferedWriter may raise an error.
606
+ if proc.stdin:
607
+ proc.stdin.close()
608
+
609
+ p = proc_or_pid
610
+ try:
611
+ if isinstance(p, int):
612
+ return term_pid(p, wait_timeout)
613
+ elif isinstance(p, (psutil.Process, psutil.Popen)):
614
+ return term_psutil_proc(p, wait_timeout)
615
+ elif isinstance(p, subprocess.Popen):
616
+ return term_subprocess_proc(p, wait_timeout)
617
+ else:
618
+ raise TypeError("wrong type %r" % p)
619
+ finally:
620
+ if isinstance(p, (subprocess.Popen, psutil.Popen)):
621
+ flush_popen(p)
622
+ pid = p if isinstance(p, int) else p.pid
623
+ assert not psutil.pid_exists(pid), pid
624
+
625
+
626
+ def reap_children(recursive=False):
627
+ """Terminate and wait() any subprocess started by this test suite
628
+ and any children currently running, ensuring that no processes stick
629
+ around to hog resources.
630
+ If recursive is True it also tries to terminate and wait()
631
+ all grandchildren started by this process.
632
+ """
633
+ # Get the children here before terminating them, as in case of
634
+ # recursive=True we don't want to lose the intermediate reference
635
+ # pointing to the grandchildren.
636
+ children = psutil.Process().children(recursive=recursive)
637
+
638
+ # Terminate subprocess.Popen.
639
+ while _subprocesses_started:
640
+ subp = _subprocesses_started.pop()
641
+ terminate(subp)
642
+
643
+ # Collect started pids.
644
+ while _pids_started:
645
+ pid = _pids_started.pop()
646
+ terminate(pid)
647
+
648
+ # Terminate children.
649
+ if children:
650
+ for p in children:
651
+ terminate(p, wait_timeout=None)
652
+ _, alive = psutil.wait_procs(children, timeout=GLOBAL_TIMEOUT)
653
+ for p in alive:
654
+ warn("couldn't terminate process %r; attempting kill()" % p)
655
+ terminate(p, sig=signal.SIGKILL)
656
+
657
+
658
+ # ===================================================================
659
+ # --- OS
660
+ # ===================================================================
661
+
662
+
663
+ def kernel_version():
664
+ """Return a tuple such as (2, 6, 36)."""
665
+ if not POSIX:
666
+ raise NotImplementedError("not POSIX")
667
+ s = ""
668
+ uname = os.uname()[2]
669
+ for c in uname:
670
+ if c.isdigit() or c == '.':
671
+ s += c
672
+ else:
673
+ break
674
+ if not s:
675
+ raise ValueError("can't parse %r" % uname)
676
+ minor = 0
677
+ micro = 0
678
+ nums = s.split('.')
679
+ major = int(nums[0])
680
+ if len(nums) >= 2:
681
+ minor = int(nums[1])
682
+ if len(nums) >= 3:
683
+ micro = int(nums[2])
684
+ return (major, minor, micro)
685
+
686
+
687
+ def get_winver():
688
+ if not WINDOWS:
689
+ raise NotImplementedError("not WINDOWS")
690
+ wv = sys.getwindowsversion()
691
+ if hasattr(wv, 'service_pack_major'): # python >= 2.7
692
+ sp = wv.service_pack_major or 0
693
+ else:
694
+ r = re.search(r"\s\d$", wv[4])
695
+ sp = int(r.group(0)) if r else 0
696
+ return (wv[0], wv[1], sp)
697
+
698
+
699
+ # ===================================================================
700
+ # --- sync primitives
701
+ # ===================================================================
702
+
703
+
704
+ class retry:
705
+ """A retry decorator."""
706
+
707
+ def __init__(
708
+ self,
709
+ exception=Exception,
710
+ timeout=None,
711
+ retries=None,
712
+ interval=0.001,
713
+ logfun=None,
714
+ ):
715
+ if timeout and retries:
716
+ raise ValueError("timeout and retries args are mutually exclusive")
717
+ self.exception = exception
718
+ self.timeout = timeout
719
+ self.retries = retries
720
+ self.interval = interval
721
+ self.logfun = logfun
722
+
723
+ def __iter__(self):
724
+ if self.timeout:
725
+ stop_at = time.time() + self.timeout
726
+ while time.time() < stop_at:
727
+ yield
728
+ elif self.retries:
729
+ for _ in range(self.retries):
730
+ yield
731
+ else:
732
+ while True:
733
+ yield
734
+
735
+ def sleep(self):
736
+ if self.interval is not None:
737
+ time.sleep(self.interval)
738
+
739
+ def __call__(self, fun):
740
+ @functools.wraps(fun)
741
+ def wrapper(*args, **kwargs):
742
+ exc = None
743
+ for _ in self:
744
+ try:
745
+ return fun(*args, **kwargs)
746
+ except self.exception as _: # NOQA
747
+ exc = _
748
+ if self.logfun is not None:
749
+ self.logfun(exc)
750
+ self.sleep()
751
+ continue
752
+ if PY3:
753
+ raise exc # noqa: PLE0704
754
+ else:
755
+ raise # noqa: PLE0704
756
+
757
+ # This way the user of the decorated function can change config
758
+ # parameters.
759
+ wrapper.decorator = self
760
+ return wrapper
761
+
762
+
763
+ @retry(
764
+ exception=psutil.NoSuchProcess,
765
+ logfun=None,
766
+ timeout=GLOBAL_TIMEOUT,
767
+ interval=0.001,
768
+ )
769
+ def wait_for_pid(pid):
770
+ """Wait for pid to show up in the process list then return.
771
+ Used in the test suite to give time the sub process to initialize.
772
+ """
773
+ if pid not in psutil.pids():
774
+ raise psutil.NoSuchProcess(pid)
775
+ psutil.Process(pid)
776
+
777
+
778
+ @retry(
779
+ exception=(FileNotFoundError, AssertionError),
780
+ logfun=None,
781
+ timeout=GLOBAL_TIMEOUT,
782
+ interval=0.001,
783
+ )
784
+ def wait_for_file(fname, delete=True, empty=False):
785
+ """Wait for a file to be written on disk with some content."""
786
+ with open(fname, "rb") as f:
787
+ data = f.read()
788
+ if not empty:
789
+ assert data
790
+ if delete:
791
+ safe_rmpath(fname)
792
+ return data
793
+
794
+
795
+ @retry(
796
+ exception=AssertionError,
797
+ logfun=None,
798
+ timeout=GLOBAL_TIMEOUT,
799
+ interval=0.001,
800
+ )
801
+ def call_until(fun):
802
+ """Keep calling function until it evaluates to True."""
803
+ ret = fun()
804
+ assert ret
805
+ return ret
806
+
807
+
808
+ # ===================================================================
809
+ # --- fs
810
+ # ===================================================================
811
+
812
+
813
+ def safe_rmpath(path):
814
+ """Convenience function for removing temporary test files or dirs."""
815
+
816
+ def retry_fun(fun):
817
+ # On Windows it could happen that the file or directory has
818
+ # open handles or references preventing the delete operation
819
+ # to succeed immediately, so we retry for a while. See:
820
+ # https://bugs.python.org/issue33240
821
+ stop_at = time.time() + GLOBAL_TIMEOUT
822
+ while time.time() < stop_at:
823
+ try:
824
+ return fun()
825
+ except FileNotFoundError:
826
+ pass
827
+ except WindowsError as _:
828
+ err = _
829
+ warn("ignoring %s" % (str(err)))
830
+ time.sleep(0.01)
831
+ raise err
832
+
833
+ try:
834
+ st = os.stat(path)
835
+ if stat.S_ISDIR(st.st_mode):
836
+ fun = functools.partial(shutil.rmtree, path)
837
+ else:
838
+ fun = functools.partial(os.remove, path)
839
+ if POSIX:
840
+ fun()
841
+ else:
842
+ retry_fun(fun)
843
+ except FileNotFoundError:
844
+ pass
845
+
846
+
847
+ def safe_mkdir(dir):
848
+ """Convenience function for creating a directory."""
849
+ try:
850
+ os.mkdir(dir)
851
+ except FileExistsError:
852
+ pass
853
+
854
+
855
+ @contextlib.contextmanager
856
+ def chdir(dirname):
857
+ """Context manager which temporarily changes the current directory."""
858
+ curdir = os.getcwd()
859
+ try:
860
+ os.chdir(dirname)
861
+ yield
862
+ finally:
863
+ os.chdir(curdir)
864
+
865
+
866
+ def create_py_exe(path):
867
+ """Create a Python executable file in the given location."""
868
+ assert not os.path.exists(path), path
869
+ atexit.register(safe_rmpath, path)
870
+ shutil.copyfile(PYTHON_EXE, path)
871
+ if POSIX:
872
+ st = os.stat(path)
873
+ os.chmod(path, st.st_mode | stat.S_IEXEC)
874
+ return path
875
+
876
+
877
+ def create_c_exe(path, c_code=None):
878
+ """Create a compiled C executable in the given location."""
879
+ assert not os.path.exists(path), path
880
+ if not which("gcc"):
881
+ raise pytest.skip("gcc is not installed")
882
+ if c_code is None:
883
+ c_code = textwrap.dedent("""
884
+ #include <unistd.h>
885
+ int main() {
886
+ pause();
887
+ return 1;
888
+ }
889
+ """)
890
+ else:
891
+ assert isinstance(c_code, str), c_code
892
+
893
+ atexit.register(safe_rmpath, path)
894
+ with open(get_testfn(suffix='.c'), "w") as f:
895
+ f.write(c_code)
896
+ try:
897
+ subprocess.check_call(["gcc", f.name, "-o", path])
898
+ finally:
899
+ safe_rmpath(f.name)
900
+ return path
901
+
902
+
903
+ def get_testfn(suffix="", dir=None):
904
+ """Return an absolute pathname of a file or dir that did not
905
+ exist at the time this call is made. Also schedule it for safe
906
+ deletion at interpreter exit. It's technically racy but probably
907
+ not really due to the time variant.
908
+ """
909
+ while True:
910
+ name = tempfile.mktemp(prefix=TESTFN_PREFIX, suffix=suffix, dir=dir)
911
+ if not os.path.exists(name): # also include dirs
912
+ path = os.path.realpath(name) # needed for OSX
913
+ atexit.register(safe_rmpath, path)
914
+ return path
915
+
916
+
917
+ # ===================================================================
918
+ # --- testing
919
+ # ===================================================================
920
+
921
+
922
+ class fake_pytest:
923
+ """A class that mimics some basic pytest APIs. This is meant for
924
+ when unit tests are run in production, where pytest may not be
925
+ installed. Still, the user can test psutil installation via:
926
+
927
+ $ python3 -m psutil.tests
928
+ """
929
+
930
+ @staticmethod
931
+ def main(*args, **kw): # noqa ARG004
932
+ """Mimics pytest.main(). It has the same effect as running
933
+ `python3 -m unittest -v` from the project root directory.
934
+ """
935
+ suite = unittest.TestLoader().discover(HERE)
936
+ unittest.TextTestRunner(verbosity=2).run(suite)
937
+ warnings.warn(
938
+ "Fake pytest module was used. Test results may be inaccurate.",
939
+ UserWarning,
940
+ stacklevel=1,
941
+ )
942
+ return suite
943
+
944
+ @staticmethod
945
+ def raises(exc, match=None):
946
+ """Mimics `pytest.raises`."""
947
+
948
+ class ExceptionInfo:
949
+ _exc = None
950
+
951
+ @property
952
+ def value(self):
953
+ return self._exc
954
+
955
+ @contextlib.contextmanager
956
+ def context(exc, match=None):
957
+ einfo = ExceptionInfo()
958
+ try:
959
+ yield einfo
960
+ except exc as err:
961
+ if match and not re.search(match, str(err)):
962
+ msg = '"{}" does not match "{}"'.format(match, str(err))
963
+ raise AssertionError(msg)
964
+ einfo._exc = err
965
+ else:
966
+ raise AssertionError("%r not raised" % exc)
967
+
968
+ return context(exc, match=match)
969
+
970
+ @staticmethod
971
+ def warns(warning, match=None):
972
+ """Mimics `pytest.warns`."""
973
+ if match:
974
+ return unittest.TestCase().assertWarnsRegex(warning, match)
975
+ return unittest.TestCase().assertWarns(warning)
976
+
977
+ @staticmethod
978
+ def skip(reason=""):
979
+ """Mimics `unittest.SkipTest`."""
980
+ raise unittest.SkipTest(reason)
981
+
982
+ class mark:
983
+
984
+ @staticmethod
985
+ def skipif(condition, reason=""):
986
+ """Mimics `@pytest.mark.skipif` decorator."""
987
+ return unittest.skipIf(condition, reason)
988
+
989
+ class xdist_group:
990
+ """Mimics `@pytest.mark.xdist_group` decorator (no-op)."""
991
+
992
+ def __init__(self, name=None):
993
+ pass
994
+
995
+ def __call__(self, cls_or_meth):
996
+ return cls_or_meth
997
+
998
+
999
+ if pytest is None:
1000
+ pytest = fake_pytest
1001
+
1002
+
1003
+ class TestCase(unittest.TestCase):
1004
+ # ...otherwise multiprocessing.Pool complains
1005
+ if not PY3:
1006
+
1007
+ def runTest(self):
1008
+ pass
1009
+
1010
+ @contextlib.contextmanager
1011
+ def subTest(self, *args, **kw):
1012
+ # fake it for python 2.7
1013
+ yield
1014
+
1015
+
1016
+ # monkey patch default unittest.TestCase
1017
+ unittest.TestCase = TestCase
1018
+
1019
+
1020
+ class PsutilTestCase(TestCase):
1021
+ """Test class providing auto-cleanup wrappers on top of process
1022
+ test utilities. All test classes should derive from this one, even
1023
+ if we use pytest.
1024
+ """
1025
+
1026
+ def get_testfn(self, suffix="", dir=None):
1027
+ fname = get_testfn(suffix=suffix, dir=dir)
1028
+ self.addCleanup(safe_rmpath, fname)
1029
+ return fname
1030
+
1031
+ def spawn_testproc(self, *args, **kwds):
1032
+ sproc = spawn_testproc(*args, **kwds)
1033
+ self.addCleanup(terminate, sproc)
1034
+ return sproc
1035
+
1036
+ def spawn_children_pair(self):
1037
+ child1, child2 = spawn_children_pair()
1038
+ self.addCleanup(terminate, child2)
1039
+ self.addCleanup(terminate, child1) # executed first
1040
+ return (child1, child2)
1041
+
1042
+ def spawn_zombie(self):
1043
+ parent, zombie = spawn_zombie()
1044
+ self.addCleanup(terminate, zombie)
1045
+ self.addCleanup(terminate, parent) # executed first
1046
+ return (parent, zombie)
1047
+
1048
+ def pyrun(self, *args, **kwds):
1049
+ sproc, srcfile = pyrun(*args, **kwds)
1050
+ self.addCleanup(safe_rmpath, srcfile)
1051
+ self.addCleanup(terminate, sproc) # executed first
1052
+ return sproc
1053
+
1054
+ def _check_proc_exc(self, proc, exc):
1055
+ assert isinstance(exc, psutil.Error)
1056
+ assert exc.pid == proc.pid
1057
+ assert exc.name == proc._name
1058
+ if exc.name:
1059
+ assert exc.name
1060
+ if isinstance(exc, psutil.ZombieProcess):
1061
+ assert exc.ppid == proc._ppid
1062
+ if exc.ppid is not None:
1063
+ assert exc.ppid >= 0
1064
+ str(exc)
1065
+ repr(exc)
1066
+
1067
+ def assertPidGone(self, pid):
1068
+ with pytest.raises(psutil.NoSuchProcess) as cm:
1069
+ try:
1070
+ psutil.Process(pid)
1071
+ except psutil.ZombieProcess:
1072
+ raise AssertionError("wasn't supposed to raise ZombieProcess")
1073
+ assert cm.value.pid == pid
1074
+ assert cm.value.name is None
1075
+ assert not psutil.pid_exists(pid), pid
1076
+ assert pid not in psutil.pids()
1077
+ assert pid not in [x.pid for x in psutil.process_iter()]
1078
+
1079
+ def assertProcessGone(self, proc):
1080
+ self.assertPidGone(proc.pid)
1081
+ ns = process_namespace(proc)
1082
+ for fun, name in ns.iter(ns.all, clear_cache=True):
1083
+ with self.subTest(proc=proc, name=name):
1084
+ try:
1085
+ ret = fun()
1086
+ except psutil.ZombieProcess:
1087
+ raise
1088
+ except psutil.NoSuchProcess as exc:
1089
+ self._check_proc_exc(proc, exc)
1090
+ else:
1091
+ msg = "Process.%s() didn't raise NSP and returned %r" % (
1092
+ name,
1093
+ ret,
1094
+ )
1095
+ raise AssertionError(msg)
1096
+ proc.wait(timeout=0) # assert not raise TimeoutExpired
1097
+
1098
+ def assertProcessZombie(self, proc):
1099
+ # A zombie process should always be instantiable.
1100
+ clone = psutil.Process(proc.pid)
1101
+ # Cloned zombie on Open/NetBSD has null creation time, see:
1102
+ # https://github.com/giampaolo/psutil/issues/2287
1103
+ assert proc == clone
1104
+ if not (OPENBSD or NETBSD):
1105
+ assert hash(proc) == hash(clone)
1106
+ # Its status always be querable.
1107
+ assert proc.status() == psutil.STATUS_ZOMBIE
1108
+ # It should be considered 'running'.
1109
+ assert proc.is_running()
1110
+ assert psutil.pid_exists(proc.pid)
1111
+ # as_dict() shouldn't crash.
1112
+ proc.as_dict()
1113
+ # It should show up in pids() and process_iter().
1114
+ assert proc.pid in psutil.pids()
1115
+ assert proc.pid in [x.pid for x in psutil.process_iter()]
1116
+ psutil._pmap = {}
1117
+ assert proc.pid in [x.pid for x in psutil.process_iter()]
1118
+ # Call all methods.
1119
+ ns = process_namespace(proc)
1120
+ for fun, name in ns.iter(ns.all, clear_cache=True):
1121
+ with self.subTest(proc=proc, name=name):
1122
+ try:
1123
+ fun()
1124
+ except (psutil.ZombieProcess, psutil.AccessDenied) as exc:
1125
+ self._check_proc_exc(proc, exc)
1126
+ if LINUX:
1127
+ # https://github.com/giampaolo/psutil/pull/2288
1128
+ with pytest.raises(psutil.ZombieProcess) as cm:
1129
+ proc.cmdline()
1130
+ self._check_proc_exc(proc, cm.value)
1131
+ with pytest.raises(psutil.ZombieProcess) as cm:
1132
+ proc.exe()
1133
+ self._check_proc_exc(proc, cm.value)
1134
+ with pytest.raises(psutil.ZombieProcess) as cm:
1135
+ proc.memory_maps()
1136
+ self._check_proc_exc(proc, cm.value)
1137
+ # Zombie cannot be signaled or terminated.
1138
+ proc.suspend()
1139
+ proc.resume()
1140
+ proc.terminate()
1141
+ proc.kill()
1142
+ assert proc.is_running()
1143
+ assert psutil.pid_exists(proc.pid)
1144
+ assert proc.pid in psutil.pids()
1145
+ assert proc.pid in [x.pid for x in psutil.process_iter()]
1146
+ psutil._pmap = {}
1147
+ assert proc.pid in [x.pid for x in psutil.process_iter()]
1148
+
1149
+ # Its parent should 'see' it (edit: not true on BSD and MACOS).
1150
+ # descendants = [x.pid for x in psutil.Process().children(
1151
+ # recursive=True)]
1152
+ # self.assertIn(proc.pid, descendants)
1153
+
1154
+ # __eq__ can't be relied upon because creation time may not be
1155
+ # querable.
1156
+ # self.assertEqual(proc, psutil.Process(proc.pid))
1157
+
1158
+ # XXX should we also assume ppid() to be usable? Note: this
1159
+ # would be an important use case as the only way to get
1160
+ # rid of a zombie is to kill its parent.
1161
+ # self.assertEqual(proc.ppid(), os.getpid())
1162
+
1163
+
1164
+ @pytest.mark.skipif(PYPY, reason="unreliable on PYPY")
1165
+ class TestMemoryLeak(PsutilTestCase):
1166
+ """Test framework class for detecting function memory leaks,
1167
+ typically functions implemented in C which forgot to free() memory
1168
+ from the heap. It does so by checking whether the process memory
1169
+ usage increased before and after calling the function many times.
1170
+
1171
+ Note that this is hard (probably impossible) to do reliably, due
1172
+ to how the OS handles memory, the GC and so on (memory can even
1173
+ decrease!). In order to avoid false positives, in case of failure
1174
+ (mem > 0) we retry the test for up to 5 times, increasing call
1175
+ repetitions each time. If the memory keeps increasing then it's a
1176
+ failure.
1177
+
1178
+ If available (Linux, OSX, Windows), USS memory is used for comparison,
1179
+ since it's supposed to be more precise, see:
1180
+ https://gmpy.dev/blog/2016/real-process-memory-and-environ-in-python
1181
+ If not, RSS memory is used. mallinfo() on Linux and _heapwalk() on
1182
+ Windows may give even more precision, but at the moment are not
1183
+ implemented.
1184
+
1185
+ PyPy appears to be completely unstable for this framework, probably
1186
+ because of its JIT, so tests on PYPY are skipped.
1187
+
1188
+ Usage:
1189
+
1190
+ class TestLeaks(psutil.tests.TestMemoryLeak):
1191
+
1192
+ def test_fun(self):
1193
+ self.execute(some_function)
1194
+ """
1195
+
1196
+ # Configurable class attrs.
1197
+ times = 200
1198
+ warmup_times = 10
1199
+ tolerance = 0 # memory
1200
+ retries = 10 if CI_TESTING else 5
1201
+ verbose = True
1202
+ _thisproc = psutil.Process()
1203
+ _psutil_debug_orig = bool(os.getenv('PSUTIL_DEBUG'))
1204
+
1205
+ @classmethod
1206
+ def setUpClass(cls):
1207
+ psutil._set_debug(False) # avoid spamming to stderr
1208
+
1209
+ @classmethod
1210
+ def tearDownClass(cls):
1211
+ psutil._set_debug(cls._psutil_debug_orig)
1212
+
1213
+ def _get_mem(self):
1214
+ # USS is the closest thing we have to "real" memory usage and it
1215
+ # should be less likely to produce false positives.
1216
+ mem = self._thisproc.memory_full_info()
1217
+ return getattr(mem, "uss", mem.rss)
1218
+
1219
+ def _get_num_fds(self):
1220
+ if POSIX:
1221
+ return self._thisproc.num_fds()
1222
+ else:
1223
+ return self._thisproc.num_handles()
1224
+
1225
+ def _log(self, msg):
1226
+ if self.verbose:
1227
+ print_color(msg, color="yellow", file=sys.stderr)
1228
+
1229
+ def _check_fds(self, fun):
1230
+ """Makes sure num_fds() (POSIX) or num_handles() (Windows) does
1231
+ not increase after calling a function. Used to discover forgotten
1232
+ close(2) and CloseHandle syscalls.
1233
+ """
1234
+ before = self._get_num_fds()
1235
+ self.call(fun)
1236
+ after = self._get_num_fds()
1237
+ diff = after - before
1238
+ if diff < 0:
1239
+ raise self.fail(
1240
+ "negative diff %r (gc probably collected a "
1241
+ "resource from a previous test)" % diff
1242
+ )
1243
+ if diff > 0:
1244
+ type_ = "fd" if POSIX else "handle"
1245
+ if diff > 1:
1246
+ type_ += "s"
1247
+ msg = "%s unclosed %s after calling %r" % (diff, type_, fun)
1248
+ raise self.fail(msg)
1249
+
1250
+ def _call_ntimes(self, fun, times):
1251
+ """Get 2 distinct memory samples, before and after having
1252
+ called fun repeatedly, and return the memory difference.
1253
+ """
1254
+ gc.collect(generation=1)
1255
+ mem1 = self._get_mem()
1256
+ for x in range(times):
1257
+ ret = self.call(fun)
1258
+ del x, ret
1259
+ gc.collect(generation=1)
1260
+ mem2 = self._get_mem()
1261
+ assert gc.garbage == []
1262
+ diff = mem2 - mem1 # can also be negative
1263
+ return diff
1264
+
1265
+ def _check_mem(self, fun, times, retries, tolerance):
1266
+ messages = []
1267
+ prev_mem = 0
1268
+ increase = times
1269
+ for idx in range(1, retries + 1):
1270
+ mem = self._call_ntimes(fun, times)
1271
+ msg = "Run #%s: extra-mem=%s, per-call=%s, calls=%s" % (
1272
+ idx,
1273
+ bytes2human(mem),
1274
+ bytes2human(mem / times),
1275
+ times,
1276
+ )
1277
+ messages.append(msg)
1278
+ success = mem <= tolerance or mem <= prev_mem
1279
+ if success:
1280
+ if idx > 1:
1281
+ self._log(msg)
1282
+ return
1283
+ else:
1284
+ if idx == 1:
1285
+ print() # NOQA
1286
+ self._log(msg)
1287
+ times += increase
1288
+ prev_mem = mem
1289
+ raise self.fail(". ".join(messages))
1290
+
1291
+ # ---
1292
+
1293
+ def call(self, fun):
1294
+ return fun()
1295
+
1296
+ def execute(
1297
+ self, fun, times=None, warmup_times=None, retries=None, tolerance=None
1298
+ ):
1299
+ """Test a callable."""
1300
+ times = times if times is not None else self.times
1301
+ warmup_times = (
1302
+ warmup_times if warmup_times is not None else self.warmup_times
1303
+ )
1304
+ retries = retries if retries is not None else self.retries
1305
+ tolerance = tolerance if tolerance is not None else self.tolerance
1306
+ try:
1307
+ assert times >= 1, "times must be >= 1"
1308
+ assert warmup_times >= 0, "warmup_times must be >= 0"
1309
+ assert retries >= 0, "retries must be >= 0"
1310
+ assert tolerance >= 0, "tolerance must be >= 0"
1311
+ except AssertionError as err:
1312
+ raise ValueError(str(err))
1313
+
1314
+ self._call_ntimes(fun, warmup_times) # warm up
1315
+ self._check_fds(fun)
1316
+ self._check_mem(fun, times=times, retries=retries, tolerance=tolerance)
1317
+
1318
+ def execute_w_exc(self, exc, fun, **kwargs):
1319
+ """Convenience method to test a callable while making sure it
1320
+ raises an exception on every call.
1321
+ """
1322
+
1323
+ def call():
1324
+ self.assertRaises(exc, fun)
1325
+
1326
+ self.execute(call, **kwargs)
1327
+
1328
+
1329
+ def print_sysinfo():
1330
+ import collections
1331
+ import datetime
1332
+ import getpass
1333
+ import locale
1334
+ import pprint
1335
+
1336
+ try:
1337
+ import pip
1338
+ except ImportError:
1339
+ pip = None
1340
+ try:
1341
+ import wheel
1342
+ except ImportError:
1343
+ wheel = None
1344
+
1345
+ info = collections.OrderedDict()
1346
+
1347
+ # OS
1348
+ if psutil.LINUX and which('lsb_release'):
1349
+ info['OS'] = sh('lsb_release -d -s')
1350
+ elif psutil.OSX:
1351
+ info['OS'] = 'Darwin %s' % platform.mac_ver()[0]
1352
+ elif psutil.WINDOWS:
1353
+ info['OS'] = "Windows " + ' '.join(map(str, platform.win32_ver()))
1354
+ if hasattr(platform, 'win32_edition'):
1355
+ info['OS'] += ", " + platform.win32_edition()
1356
+ else:
1357
+ info['OS'] = "%s %s" % (platform.system(), platform.version())
1358
+ info['arch'] = ', '.join(
1359
+ list(platform.architecture()) + [platform.machine()]
1360
+ )
1361
+ if psutil.POSIX:
1362
+ info['kernel'] = platform.uname()[2]
1363
+
1364
+ # python
1365
+ info['python'] = ', '.join([
1366
+ platform.python_implementation(),
1367
+ platform.python_version(),
1368
+ platform.python_compiler(),
1369
+ ])
1370
+ info['pip'] = getattr(pip, '__version__', 'not installed')
1371
+ if wheel is not None:
1372
+ info['pip'] += " (wheel=%s)" % wheel.__version__
1373
+
1374
+ # UNIX
1375
+ if psutil.POSIX:
1376
+ if which('gcc'):
1377
+ out = sh(['gcc', '--version'])
1378
+ info['gcc'] = str(out).split('\n')[0]
1379
+ else:
1380
+ info['gcc'] = 'not installed'
1381
+ s = platform.libc_ver()[1]
1382
+ if s:
1383
+ info['glibc'] = s
1384
+
1385
+ # system
1386
+ info['fs-encoding'] = sys.getfilesystemencoding()
1387
+ lang = locale.getlocale()
1388
+ info['lang'] = '%s, %s' % (lang[0], lang[1])
1389
+ info['boot-time'] = datetime.datetime.fromtimestamp(
1390
+ psutil.boot_time()
1391
+ ).strftime("%Y-%m-%d %H:%M:%S")
1392
+ info['time'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
1393
+ info['user'] = getpass.getuser()
1394
+ info['home'] = os.path.expanduser("~")
1395
+ info['cwd'] = os.getcwd()
1396
+ info['pyexe'] = PYTHON_EXE
1397
+ info['hostname'] = platform.node()
1398
+ info['PID'] = os.getpid()
1399
+
1400
+ # metrics
1401
+ info['cpus'] = psutil.cpu_count()
1402
+ info['loadavg'] = "%.1f%%, %.1f%%, %.1f%%" % (
1403
+ tuple([x / psutil.cpu_count() * 100 for x in psutil.getloadavg()])
1404
+ )
1405
+ mem = psutil.virtual_memory()
1406
+ info['memory'] = "%s%%, used=%s, total=%s" % (
1407
+ int(mem.percent),
1408
+ bytes2human(mem.used),
1409
+ bytes2human(mem.total),
1410
+ )
1411
+ swap = psutil.swap_memory()
1412
+ info['swap'] = "%s%%, used=%s, total=%s" % (
1413
+ int(swap.percent),
1414
+ bytes2human(swap.used),
1415
+ bytes2human(swap.total),
1416
+ )
1417
+ info['pids'] = len(psutil.pids())
1418
+ pinfo = psutil.Process().as_dict()
1419
+ pinfo.pop('memory_maps', None)
1420
+ info['proc'] = pprint.pformat(pinfo)
1421
+
1422
+ print("=" * 70, file=sys.stderr) # NOQA
1423
+ for k, v in info.items():
1424
+ print("%-17s %s" % (k + ':', v), file=sys.stderr) # NOQA
1425
+ print("=" * 70, file=sys.stderr) # NOQA
1426
+ sys.stdout.flush()
1427
+
1428
+ # if WINDOWS:
1429
+ # os.system("tasklist")
1430
+ # elif which("ps"):
1431
+ # os.system("ps aux")
1432
+ # print("=" * 70, file=sys.stderr) # NOQA
1433
+
1434
+ sys.stdout.flush()
1435
+
1436
+
1437
+ def is_win_secure_system_proc(pid):
1438
+ # see: https://github.com/giampaolo/psutil/issues/2338
1439
+ @memoize
1440
+ def get_procs():
1441
+ ret = {}
1442
+ out = sh("tasklist.exe /NH /FO csv")
1443
+ for line in out.splitlines()[1:]:
1444
+ bits = [x.replace('"', "") for x in line.split(",")]
1445
+ name, pid = bits[0], int(bits[1])
1446
+ ret[pid] = name
1447
+ return ret
1448
+
1449
+ try:
1450
+ return get_procs()[pid] == "Secure System"
1451
+ except KeyError:
1452
+ return False
1453
+
1454
+
1455
+ def _get_eligible_cpu():
1456
+ p = psutil.Process()
1457
+ if hasattr(p, "cpu_num"):
1458
+ return p.cpu_num()
1459
+ elif hasattr(p, "cpu_affinity"):
1460
+ return random.choice(p.cpu_affinity())
1461
+ return 0
1462
+
1463
+
1464
+ class process_namespace:
1465
+ """A container that lists all Process class method names + some
1466
+ reasonable parameters to be called with. Utility methods (parent(),
1467
+ children(), ...) are excluded.
1468
+
1469
+ >>> ns = process_namespace(psutil.Process())
1470
+ >>> for fun, name in ns.iter(ns.getters):
1471
+ ... fun()
1472
+ """
1473
+
1474
+ utils = [('cpu_percent', (), {}), ('memory_percent', (), {})]
1475
+
1476
+ ignored = [
1477
+ ('as_dict', (), {}),
1478
+ ('children', (), {'recursive': True}),
1479
+ ('connections', (), {}), # deprecated
1480
+ ('is_running', (), {}),
1481
+ ('memory_info_ex', (), {}), # deprecated
1482
+ ('oneshot', (), {}),
1483
+ ('parent', (), {}),
1484
+ ('parents', (), {}),
1485
+ ('pid', (), {}),
1486
+ ('wait', (0,), {}),
1487
+ ]
1488
+
1489
+ getters = [
1490
+ ('cmdline', (), {}),
1491
+ ('cpu_times', (), {}),
1492
+ ('create_time', (), {}),
1493
+ ('cwd', (), {}),
1494
+ ('exe', (), {}),
1495
+ ('memory_full_info', (), {}),
1496
+ ('memory_info', (), {}),
1497
+ ('name', (), {}),
1498
+ ('net_connections', (), {'kind': 'all'}),
1499
+ ('nice', (), {}),
1500
+ ('num_ctx_switches', (), {}),
1501
+ ('num_threads', (), {}),
1502
+ ('open_files', (), {}),
1503
+ ('ppid', (), {}),
1504
+ ('status', (), {}),
1505
+ ('threads', (), {}),
1506
+ ('username', (), {}),
1507
+ ]
1508
+ if POSIX:
1509
+ getters += [('uids', (), {})]
1510
+ getters += [('gids', (), {})]
1511
+ getters += [('terminal', (), {})]
1512
+ getters += [('num_fds', (), {})]
1513
+ if HAS_PROC_IO_COUNTERS:
1514
+ getters += [('io_counters', (), {})]
1515
+ if HAS_IONICE:
1516
+ getters += [('ionice', (), {})]
1517
+ if HAS_RLIMIT:
1518
+ getters += [('rlimit', (psutil.RLIMIT_NOFILE,), {})]
1519
+ if HAS_CPU_AFFINITY:
1520
+ getters += [('cpu_affinity', (), {})]
1521
+ if HAS_PROC_CPU_NUM:
1522
+ getters += [('cpu_num', (), {})]
1523
+ if HAS_ENVIRON:
1524
+ getters += [('environ', (), {})]
1525
+ if WINDOWS:
1526
+ getters += [('num_handles', (), {})]
1527
+ if HAS_MEMORY_MAPS:
1528
+ getters += [('memory_maps', (), {'grouped': False})]
1529
+
1530
+ setters = []
1531
+ if POSIX:
1532
+ setters += [('nice', (0,), {})]
1533
+ else:
1534
+ setters += [('nice', (psutil.NORMAL_PRIORITY_CLASS,), {})]
1535
+ if HAS_RLIMIT:
1536
+ setters += [('rlimit', (psutil.RLIMIT_NOFILE, (1024, 4096)), {})]
1537
+ if HAS_IONICE:
1538
+ if LINUX:
1539
+ setters += [('ionice', (psutil.IOPRIO_CLASS_NONE, 0), {})]
1540
+ else:
1541
+ setters += [('ionice', (psutil.IOPRIO_NORMAL,), {})]
1542
+ if HAS_CPU_AFFINITY:
1543
+ setters += [('cpu_affinity', ([_get_eligible_cpu()],), {})]
1544
+
1545
+ killers = [
1546
+ ('send_signal', (signal.SIGTERM,), {}),
1547
+ ('suspend', (), {}),
1548
+ ('resume', (), {}),
1549
+ ('terminate', (), {}),
1550
+ ('kill', (), {}),
1551
+ ]
1552
+ if WINDOWS:
1553
+ killers += [('send_signal', (signal.CTRL_C_EVENT,), {})]
1554
+ killers += [('send_signal', (signal.CTRL_BREAK_EVENT,), {})]
1555
+
1556
+ all = utils + getters + setters + killers
1557
+
1558
+ def __init__(self, proc):
1559
+ self._proc = proc
1560
+
1561
+ def iter(self, ls, clear_cache=True):
1562
+ """Given a list of tuples yields a set of (fun, fun_name) tuples
1563
+ in random order.
1564
+ """
1565
+ ls = list(ls)
1566
+ random.shuffle(ls)
1567
+ for fun_name, args, kwds in ls:
1568
+ if clear_cache:
1569
+ self.clear_cache()
1570
+ fun = getattr(self._proc, fun_name)
1571
+ fun = functools.partial(fun, *args, **kwds)
1572
+ yield (fun, fun_name)
1573
+
1574
+ def clear_cache(self):
1575
+ """Clear the cache of a Process instance."""
1576
+ self._proc._init(self._proc.pid, _ignore_nsp=True)
1577
+
1578
+ @classmethod
1579
+ def test_class_coverage(cls, test_class, ls):
1580
+ """Given a TestCase instance and a list of tuples checks that
1581
+ the class defines the required test method names.
1582
+ """
1583
+ for fun_name, _, _ in ls:
1584
+ meth_name = 'test_' + fun_name
1585
+ if not hasattr(test_class, meth_name):
1586
+ msg = "%r class should define a '%s' method" % (
1587
+ test_class.__class__.__name__,
1588
+ meth_name,
1589
+ )
1590
+ raise AttributeError(msg)
1591
+
1592
+ @classmethod
1593
+ def test(cls):
1594
+ this = set([x[0] for x in cls.all])
1595
+ ignored = set([x[0] for x in cls.ignored])
1596
+ klass = set([x for x in dir(psutil.Process) if x[0] != '_'])
1597
+ leftout = (this | ignored) ^ klass
1598
+ if leftout:
1599
+ raise ValueError("uncovered Process class names: %r" % leftout)
1600
+
1601
+
1602
+ class system_namespace:
1603
+ """A container that lists all the module-level, system-related APIs.
1604
+ Utilities such as cpu_percent() are excluded. Usage:
1605
+
1606
+ >>> ns = system_namespace
1607
+ >>> for fun, name in ns.iter(ns.getters):
1608
+ ... fun()
1609
+ """
1610
+
1611
+ getters = [
1612
+ ('boot_time', (), {}),
1613
+ ('cpu_count', (), {'logical': False}),
1614
+ ('cpu_count', (), {'logical': True}),
1615
+ ('cpu_stats', (), {}),
1616
+ ('cpu_times', (), {'percpu': False}),
1617
+ ('cpu_times', (), {'percpu': True}),
1618
+ ('disk_io_counters', (), {'perdisk': True}),
1619
+ ('disk_partitions', (), {'all': True}),
1620
+ ('disk_usage', (os.getcwd(),), {}),
1621
+ ('net_connections', (), {'kind': 'all'}),
1622
+ ('net_if_addrs', (), {}),
1623
+ ('net_if_stats', (), {}),
1624
+ ('net_io_counters', (), {'pernic': True}),
1625
+ ('pid_exists', (os.getpid(),), {}),
1626
+ ('pids', (), {}),
1627
+ ('swap_memory', (), {}),
1628
+ ('users', (), {}),
1629
+ ('virtual_memory', (), {}),
1630
+ ]
1631
+ if HAS_CPU_FREQ:
1632
+ if MACOS and platform.machine() == 'arm64': # skipped due to #1892
1633
+ pass
1634
+ else:
1635
+ getters += [('cpu_freq', (), {'percpu': True})]
1636
+ if HAS_GETLOADAVG:
1637
+ getters += [('getloadavg', (), {})]
1638
+ if HAS_SENSORS_TEMPERATURES:
1639
+ getters += [('sensors_temperatures', (), {})]
1640
+ if HAS_SENSORS_FANS:
1641
+ getters += [('sensors_fans', (), {})]
1642
+ if HAS_SENSORS_BATTERY:
1643
+ getters += [('sensors_battery', (), {})]
1644
+ if WINDOWS:
1645
+ getters += [('win_service_iter', (), {})]
1646
+ getters += [('win_service_get', ('alg',), {})]
1647
+
1648
+ ignored = [
1649
+ ('process_iter', (), {}),
1650
+ ('wait_procs', ([psutil.Process()],), {}),
1651
+ ('cpu_percent', (), {}),
1652
+ ('cpu_times_percent', (), {}),
1653
+ ]
1654
+
1655
+ all = getters
1656
+
1657
+ @staticmethod
1658
+ def iter(ls):
1659
+ """Given a list of tuples yields a set of (fun, fun_name) tuples
1660
+ in random order.
1661
+ """
1662
+ ls = list(ls)
1663
+ random.shuffle(ls)
1664
+ for fun_name, args, kwds in ls:
1665
+ fun = getattr(psutil, fun_name)
1666
+ fun = functools.partial(fun, *args, **kwds)
1667
+ yield (fun, fun_name)
1668
+
1669
+ test_class_coverage = process_namespace.test_class_coverage
1670
+
1671
+
1672
+ def retry_on_failure(retries=NO_RETRIES):
1673
+ """Decorator which runs a test function and retries N times before
1674
+ actually failing.
1675
+ """
1676
+
1677
+ def logfun(exc):
1678
+ print("%r, retrying" % exc, file=sys.stderr) # NOQA
1679
+
1680
+ return retry(
1681
+ exception=AssertionError, timeout=None, retries=retries, logfun=logfun
1682
+ )
1683
+
1684
+
1685
+ def skip_on_access_denied(only_if=None):
1686
+ """Decorator to Ignore AccessDenied exceptions."""
1687
+
1688
+ def decorator(fun):
1689
+ @functools.wraps(fun)
1690
+ def wrapper(*args, **kwargs):
1691
+ try:
1692
+ return fun(*args, **kwargs)
1693
+ except psutil.AccessDenied:
1694
+ if only_if is not None:
1695
+ if not only_if:
1696
+ raise
1697
+ raise pytest.skip("raises AccessDenied")
1698
+
1699
+ return wrapper
1700
+
1701
+ return decorator
1702
+
1703
+
1704
+ def skip_on_not_implemented(only_if=None):
1705
+ """Decorator to Ignore NotImplementedError exceptions."""
1706
+
1707
+ def decorator(fun):
1708
+ @functools.wraps(fun)
1709
+ def wrapper(*args, **kwargs):
1710
+ try:
1711
+ return fun(*args, **kwargs)
1712
+ except NotImplementedError:
1713
+ if only_if is not None:
1714
+ if not only_if:
1715
+ raise
1716
+ msg = (
1717
+ "%r was skipped because it raised NotImplementedError"
1718
+ % fun.__name__
1719
+ )
1720
+ raise pytest.skip(msg)
1721
+
1722
+ return wrapper
1723
+
1724
+ return decorator
1725
+
1726
+
1727
+ # ===================================================================
1728
+ # --- network
1729
+ # ===================================================================
1730
+
1731
+
1732
+ # XXX: no longer used
1733
+ def get_free_port(host='127.0.0.1'):
1734
+ """Return an unused TCP port. Subject to race conditions."""
1735
+ with contextlib.closing(socket.socket()) as sock:
1736
+ sock.bind((host, 0))
1737
+ return sock.getsockname()[1]
1738
+
1739
+
1740
+ def bind_socket(family=AF_INET, type=SOCK_STREAM, addr=None):
1741
+ """Binds a generic socket."""
1742
+ if addr is None and family in {AF_INET, AF_INET6}:
1743
+ addr = ("", 0)
1744
+ sock = socket.socket(family, type)
1745
+ try:
1746
+ if os.name not in {'nt', 'cygwin'}:
1747
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1748
+ sock.bind(addr)
1749
+ if type == socket.SOCK_STREAM:
1750
+ sock.listen(5)
1751
+ return sock
1752
+ except Exception:
1753
+ sock.close()
1754
+ raise
1755
+
1756
+
1757
+ def bind_unix_socket(name, type=socket.SOCK_STREAM):
1758
+ """Bind a UNIX socket."""
1759
+ assert psutil.POSIX
1760
+ assert not os.path.exists(name), name
1761
+ sock = socket.socket(socket.AF_UNIX, type)
1762
+ try:
1763
+ sock.bind(name)
1764
+ if type == socket.SOCK_STREAM:
1765
+ sock.listen(5)
1766
+ except Exception:
1767
+ sock.close()
1768
+ raise
1769
+ return sock
1770
+
1771
+
1772
+ def tcp_socketpair(family, addr=("", 0)):
1773
+ """Build a pair of TCP sockets connected to each other.
1774
+ Return a (server, client) tuple.
1775
+ """
1776
+ with contextlib.closing(socket.socket(family, SOCK_STREAM)) as ll:
1777
+ ll.bind(addr)
1778
+ ll.listen(5)
1779
+ addr = ll.getsockname()
1780
+ c = socket.socket(family, SOCK_STREAM)
1781
+ try:
1782
+ c.connect(addr)
1783
+ caddr = c.getsockname()
1784
+ while True:
1785
+ a, addr = ll.accept()
1786
+ # check that we've got the correct client
1787
+ if addr == caddr:
1788
+ return (a, c)
1789
+ a.close()
1790
+ except OSError:
1791
+ c.close()
1792
+ raise
1793
+
1794
+
1795
+ def unix_socketpair(name):
1796
+ """Build a pair of UNIX sockets connected to each other through
1797
+ the same UNIX file name.
1798
+ Return a (server, client) tuple.
1799
+ """
1800
+ assert psutil.POSIX
1801
+ server = client = None
1802
+ try:
1803
+ server = bind_unix_socket(name, type=socket.SOCK_STREAM)
1804
+ server.setblocking(0)
1805
+ client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
1806
+ client.setblocking(0)
1807
+ client.connect(name)
1808
+ # new = server.accept()
1809
+ except Exception:
1810
+ if server is not None:
1811
+ server.close()
1812
+ if client is not None:
1813
+ client.close()
1814
+ raise
1815
+ return (server, client)
1816
+
1817
+
1818
+ @contextlib.contextmanager
1819
+ def create_sockets():
1820
+ """Open as many socket families / types as possible."""
1821
+ socks = []
1822
+ fname1 = fname2 = None
1823
+ try:
1824
+ socks.append(bind_socket(socket.AF_INET, socket.SOCK_STREAM))
1825
+ socks.append(bind_socket(socket.AF_INET, socket.SOCK_DGRAM))
1826
+ if supports_ipv6():
1827
+ socks.append(bind_socket(socket.AF_INET6, socket.SOCK_STREAM))
1828
+ socks.append(bind_socket(socket.AF_INET6, socket.SOCK_DGRAM))
1829
+ if POSIX and HAS_NET_CONNECTIONS_UNIX:
1830
+ fname1 = get_testfn()
1831
+ fname2 = get_testfn()
1832
+ s1, s2 = unix_socketpair(fname1)
1833
+ s3 = bind_unix_socket(fname2, type=socket.SOCK_DGRAM)
1834
+ for s in (s1, s2, s3):
1835
+ socks.append(s)
1836
+ yield socks
1837
+ finally:
1838
+ for s in socks:
1839
+ s.close()
1840
+ for fname in (fname1, fname2):
1841
+ if fname is not None:
1842
+ safe_rmpath(fname)
1843
+
1844
+
1845
+ def check_net_address(addr, family):
1846
+ """Check a net address validity. Supported families are IPv4,
1847
+ IPv6 and MAC addresses.
1848
+ """
1849
+ import ipaddress # python >= 3.3 / requires "pip install ipaddress"
1850
+
1851
+ if enum and PY3 and not PYPY:
1852
+ assert isinstance(family, enum.IntEnum), family
1853
+ if family == socket.AF_INET:
1854
+ octs = [int(x) for x in addr.split('.')]
1855
+ assert len(octs) == 4, addr
1856
+ for num in octs:
1857
+ assert 0 <= num <= 255, addr
1858
+ if not PY3:
1859
+ addr = unicode(addr)
1860
+ ipaddress.IPv4Address(addr)
1861
+ elif family == socket.AF_INET6:
1862
+ assert isinstance(addr, str), addr
1863
+ if not PY3:
1864
+ addr = unicode(addr)
1865
+ ipaddress.IPv6Address(addr)
1866
+ elif family == psutil.AF_LINK:
1867
+ assert re.match(r'([a-fA-F0-9]{2}[:|\-]?){6}', addr) is not None, addr
1868
+ else:
1869
+ raise ValueError("unknown family %r" % family)
1870
+
1871
+
1872
+ def check_connection_ntuple(conn):
1873
+ """Check validity of a connection namedtuple."""
1874
+
1875
+ def check_ntuple(conn):
1876
+ has_pid = len(conn) == 7
1877
+ assert len(conn) in {6, 7}, len(conn)
1878
+ assert conn[0] == conn.fd, conn.fd
1879
+ assert conn[1] == conn.family, conn.family
1880
+ assert conn[2] == conn.type, conn.type
1881
+ assert conn[3] == conn.laddr, conn.laddr
1882
+ assert conn[4] == conn.raddr, conn.raddr
1883
+ assert conn[5] == conn.status, conn.status
1884
+ if has_pid:
1885
+ assert conn[6] == conn.pid, conn.pid
1886
+
1887
+ def check_family(conn):
1888
+ assert conn.family in {AF_INET, AF_INET6, AF_UNIX}, conn.family
1889
+ if enum is not None:
1890
+ assert isinstance(conn.family, enum.IntEnum), conn
1891
+ else:
1892
+ assert isinstance(conn.family, int), conn
1893
+ if conn.family == AF_INET:
1894
+ # actually try to bind the local socket; ignore IPv6
1895
+ # sockets as their address might be represented as
1896
+ # an IPv4-mapped-address (e.g. "::127.0.0.1")
1897
+ # and that's rejected by bind()
1898
+ s = socket.socket(conn.family, conn.type)
1899
+ with contextlib.closing(s):
1900
+ try:
1901
+ s.bind((conn.laddr[0], 0))
1902
+ except socket.error as err:
1903
+ if err.errno != errno.EADDRNOTAVAIL:
1904
+ raise
1905
+ elif conn.family == AF_UNIX:
1906
+ assert conn.status == psutil.CONN_NONE, conn.status
1907
+
1908
+ def check_type(conn):
1909
+ # SOCK_SEQPACKET may happen in case of AF_UNIX socks
1910
+ SOCK_SEQPACKET = getattr(socket, "SOCK_SEQPACKET", object())
1911
+ assert conn.type in {
1912
+ socket.SOCK_STREAM,
1913
+ socket.SOCK_DGRAM,
1914
+ SOCK_SEQPACKET,
1915
+ }, conn.type
1916
+ if enum is not None:
1917
+ assert isinstance(conn.type, enum.IntEnum), conn
1918
+ else:
1919
+ assert isinstance(conn.type, int), conn
1920
+ if conn.type == socket.SOCK_DGRAM:
1921
+ assert conn.status == psutil.CONN_NONE, conn.status
1922
+
1923
+ def check_addrs(conn):
1924
+ # check IP address and port sanity
1925
+ for addr in (conn.laddr, conn.raddr):
1926
+ if conn.family in {AF_INET, AF_INET6}:
1927
+ assert isinstance(addr, tuple), type(addr)
1928
+ if not addr:
1929
+ continue
1930
+ assert isinstance(addr.port, int), type(addr.port)
1931
+ assert 0 <= addr.port <= 65535, addr.port
1932
+ check_net_address(addr.ip, conn.family)
1933
+ elif conn.family == AF_UNIX:
1934
+ assert isinstance(addr, str), type(addr)
1935
+
1936
+ def check_status(conn):
1937
+ assert isinstance(conn.status, str), conn.status
1938
+ valids = [
1939
+ getattr(psutil, x) for x in dir(psutil) if x.startswith('CONN_')
1940
+ ]
1941
+ assert conn.status in valids, conn.status
1942
+ if conn.family in {AF_INET, AF_INET6} and conn.type == SOCK_STREAM:
1943
+ assert conn.status != psutil.CONN_NONE, conn.status
1944
+ else:
1945
+ assert conn.status == psutil.CONN_NONE, conn.status
1946
+
1947
+ check_ntuple(conn)
1948
+ check_family(conn)
1949
+ check_type(conn)
1950
+ check_addrs(conn)
1951
+ check_status(conn)
1952
+
1953
+
1954
+ def filter_proc_net_connections(cons):
1955
+ """Our process may start with some open UNIX sockets which are not
1956
+ initialized by us, invalidating unit tests.
1957
+ """
1958
+ new = []
1959
+ for conn in cons:
1960
+ if POSIX and conn.family == socket.AF_UNIX:
1961
+ if MACOS and "/syslog" in conn.raddr:
1962
+ debug("skipping %s" % str(conn))
1963
+ continue
1964
+ new.append(conn)
1965
+ return new
1966
+
1967
+
1968
+ # ===================================================================
1969
+ # --- compatibility
1970
+ # ===================================================================
1971
+
1972
+
1973
+ def reload_module(module):
1974
+ """Backport of importlib.reload of Python 3.3+."""
1975
+ try:
1976
+ import importlib
1977
+
1978
+ if not hasattr(importlib, 'reload'): # python <=3.3
1979
+ raise ImportError
1980
+ except ImportError:
1981
+ import imp
1982
+
1983
+ return imp.reload(module)
1984
+ else:
1985
+ return importlib.reload(module)
1986
+
1987
+
1988
+ def import_module_by_path(path):
1989
+ name = os.path.splitext(os.path.basename(path))[0]
1990
+ if sys.version_info[0] < 3:
1991
+ import imp
1992
+
1993
+ return imp.load_source(name, path)
1994
+ else:
1995
+ import importlib.util
1996
+
1997
+ spec = importlib.util.spec_from_file_location(name, path)
1998
+ mod = importlib.util.module_from_spec(spec)
1999
+ spec.loader.exec_module(mod)
2000
+ return mod
2001
+
2002
+
2003
+ # ===================================================================
2004
+ # --- others
2005
+ # ===================================================================
2006
+
2007
+
2008
+ def warn(msg):
2009
+ """Raise a warning msg."""
2010
+ warnings.warn(msg, UserWarning, stacklevel=2)
2011
+
2012
+
2013
+ def is_namedtuple(x):
2014
+ """Check if object is an instance of namedtuple."""
2015
+ t = type(x)
2016
+ b = t.__bases__
2017
+ if len(b) != 1 or b[0] is not tuple:
2018
+ return False
2019
+ f = getattr(t, '_fields', None)
2020
+ if not isinstance(f, tuple):
2021
+ return False
2022
+ return all(isinstance(n, str) for n in f)
2023
+
2024
+
2025
+ if POSIX:
2026
+
2027
+ @contextlib.contextmanager
2028
+ def copyload_shared_lib(suffix=""):
2029
+ """Ctx manager which picks up a random shared CO lib used
2030
+ by this process, copies it in another location and loads it
2031
+ in memory via ctypes. Return the new absolutized path.
2032
+ """
2033
+ exe = 'pypy' if PYPY else 'python'
2034
+ ext = ".so"
2035
+ dst = get_testfn(suffix=suffix + ext)
2036
+ libs = [
2037
+ x.path
2038
+ for x in psutil.Process().memory_maps()
2039
+ if os.path.splitext(x.path)[1] == ext and exe in x.path.lower()
2040
+ ]
2041
+ src = random.choice(libs)
2042
+ shutil.copyfile(src, dst)
2043
+ try:
2044
+ ctypes.CDLL(dst)
2045
+ yield dst
2046
+ finally:
2047
+ safe_rmpath(dst)
2048
+
2049
+ else:
2050
+
2051
+ @contextlib.contextmanager
2052
+ def copyload_shared_lib(suffix=""):
2053
+ """Ctx manager which picks up a random shared DLL lib used
2054
+ by this process, copies it in another location and loads it
2055
+ in memory via ctypes.
2056
+ Return the new absolutized, normcased path.
2057
+ """
2058
+ from ctypes import WinError
2059
+ from ctypes import wintypes
2060
+
2061
+ ext = ".dll"
2062
+ dst = get_testfn(suffix=suffix + ext)
2063
+ libs = [
2064
+ x.path
2065
+ for x in psutil.Process().memory_maps()
2066
+ if x.path.lower().endswith(ext)
2067
+ and 'python' in os.path.basename(x.path).lower()
2068
+ and 'wow64' not in x.path.lower()
2069
+ ]
2070
+ if PYPY and not libs:
2071
+ libs = [
2072
+ x.path
2073
+ for x in psutil.Process().memory_maps()
2074
+ if 'pypy' in os.path.basename(x.path).lower()
2075
+ ]
2076
+ src = random.choice(libs)
2077
+ shutil.copyfile(src, dst)
2078
+ cfile = None
2079
+ try:
2080
+ cfile = ctypes.WinDLL(dst)
2081
+ yield dst
2082
+ finally:
2083
+ # Work around OverflowError:
2084
+ # - https://ci.appveyor.com/project/giampaolo/psutil/build/1207/
2085
+ # job/o53330pbnri9bcw7
2086
+ # - http://bugs.python.org/issue30286
2087
+ # - http://stackoverflow.com/questions/23522055
2088
+ if cfile is not None:
2089
+ FreeLibrary = ctypes.windll.kernel32.FreeLibrary
2090
+ FreeLibrary.argtypes = [wintypes.HMODULE]
2091
+ ret = FreeLibrary(cfile._handle)
2092
+ if ret == 0:
2093
+ WinError()
2094
+ safe_rmpath(dst)
2095
+
2096
+
2097
+ # ===================================================================
2098
+ # --- Exit funs (first is executed last)
2099
+ # ===================================================================
2100
+
2101
+
2102
+ # this is executed first
2103
+ @atexit.register
2104
+ def cleanup_test_procs():
2105
+ reap_children(recursive=True)
2106
+
2107
+
2108
+ # atexit module does not execute exit functions in case of SIGTERM, which
2109
+ # gets sent to test subprocesses, which is a problem if they import this
2110
+ # module. With this it will. See:
2111
+ # https://gmpy.dev/blog/2016/how-to-always-execute-exit-functions-in-python
2112
+ if POSIX:
2113
+ signal.signal(signal.SIGTERM, lambda sig, _: sys.exit(sig))
.venv/lib/python3.11/site-packages/psutil/tests/__main__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2
+ # Use of this source code is governed by a BSD-style license that can be
3
+ # found in the LICENSE file.
4
+
5
+ """Run unit tests. This is invoked by:
6
+ $ python -m psutil.tests.
7
+ """
8
+
9
+ from psutil.tests import pytest
10
+
11
+
12
+ pytest.main(["-v", "-s", "--tb=short"])
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/__main__.cpython-311.pyc ADDED
Binary file (422 Bytes). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_aix.cpython-311.pyc ADDED
Binary file (5.74 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_bsd.cpython-311.pyc ADDED
Binary file (40.3 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_connections.cpython-311.pyc ADDED
Binary file (32.5 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_contracts.cpython-311.pyc ADDED
Binary file (26.3 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_memleaks.cpython-311.pyc ADDED
Binary file (40.1 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_misc.cpython-311.pyc ADDED
Binary file (65.5 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_osx.cpython-311.pyc ADDED
Binary file (12.4 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_posix.cpython-311.pyc ADDED
Binary file (29.6 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_process_all.cpython-311.pyc ADDED
Binary file (31 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_sunos.cpython-311.pyc ADDED
Binary file (2.42 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_system.cpython-311.pyc ADDED
Binary file (62.6 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_testutils.cpython-311.pyc ADDED
Binary file (47.1 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_unicode.cpython-311.pyc ADDED
Binary file (20.6 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/__pycache__/test_windows.cpython-311.pyc ADDED
Binary file (63 kB). View file
 
.venv/lib/python3.11/site-packages/psutil/tests/test_aix.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) 2009, Giampaolo Rodola'
4
+ # Copyright (c) 2017, Arnon Yaari
5
+ # All rights reserved.
6
+ # Use of this source code is governed by a BSD-style license that can be
7
+ # found in the LICENSE file.
8
+
9
+ """AIX specific tests."""
10
+
11
+ import re
12
+
13
+ import psutil
14
+ from psutil import AIX
15
+ from psutil.tests import PsutilTestCase
16
+ from psutil.tests import pytest
17
+ from psutil.tests import sh
18
+
19
+
20
+ @pytest.mark.skipif(not AIX, reason="AIX only")
21
+ class AIXSpecificTestCase(PsutilTestCase):
22
+ def test_virtual_memory(self):
23
+ out = sh('/usr/bin/svmon -O unit=KB')
24
+ re_pattern = r"memory\s*"
25
+ for field in [
26
+ "size",
27
+ "inuse",
28
+ "free",
29
+ "pin",
30
+ "virtual",
31
+ "available",
32
+ "mmode",
33
+ ]:
34
+ re_pattern += r"(?P<%s>\S+)\s+" % (field,)
35
+ matchobj = re.search(re_pattern, out)
36
+
37
+ assert matchobj is not None
38
+
39
+ KB = 1024
40
+ total = int(matchobj.group("size")) * KB
41
+ available = int(matchobj.group("available")) * KB
42
+ used = int(matchobj.group("inuse")) * KB
43
+ free = int(matchobj.group("free")) * KB
44
+
45
+ psutil_result = psutil.virtual_memory()
46
+
47
+ # TOLERANCE_SYS_MEM from psutil.tests is not enough. For some reason
48
+ # we're seeing differences of ~1.2 MB. 2 MB is still a good tolerance
49
+ # when compared to GBs.
50
+ TOLERANCE_SYS_MEM = 2 * KB * KB # 2 MB
51
+ assert psutil_result.total == total
52
+ assert abs(psutil_result.used - used) < TOLERANCE_SYS_MEM
53
+ assert abs(psutil_result.available - available) < TOLERANCE_SYS_MEM
54
+ assert abs(psutil_result.free - free) < TOLERANCE_SYS_MEM
55
+
56
+ def test_swap_memory(self):
57
+ out = sh('/usr/sbin/lsps -a')
58
+ # From the man page, "The size is given in megabytes" so we assume
59
+ # we'll always have 'MB' in the result
60
+ # TODO maybe try to use "swap -l" to check "used" too, but its units
61
+ # are not guaranteed to be "MB" so parsing may not be consistent
62
+ matchobj = re.search(
63
+ r"(?P<space>\S+)\s+"
64
+ r"(?P<vol>\S+)\s+"
65
+ r"(?P<vg>\S+)\s+"
66
+ r"(?P<size>\d+)MB",
67
+ out,
68
+ )
69
+
70
+ assert matchobj is not None
71
+
72
+ total_mb = int(matchobj.group("size"))
73
+ MB = 1024**2
74
+ psutil_result = psutil.swap_memory()
75
+ # we divide our result by MB instead of multiplying the lsps value by
76
+ # MB because lsps may round down, so we round down too
77
+ assert int(psutil_result.total / MB) == total_mb
78
+
79
+ def test_cpu_stats(self):
80
+ out = sh('/usr/bin/mpstat -a')
81
+
82
+ re_pattern = r"ALL\s*"
83
+ for field in [
84
+ "min",
85
+ "maj",
86
+ "mpcs",
87
+ "mpcr",
88
+ "dev",
89
+ "soft",
90
+ "dec",
91
+ "ph",
92
+ "cs",
93
+ "ics",
94
+ "bound",
95
+ "rq",
96
+ "push",
97
+ "S3pull",
98
+ "S3grd",
99
+ "S0rd",
100
+ "S1rd",
101
+ "S2rd",
102
+ "S3rd",
103
+ "S4rd",
104
+ "S5rd",
105
+ "sysc",
106
+ ]:
107
+ re_pattern += r"(?P<%s>\S+)\s+" % (field,)
108
+ matchobj = re.search(re_pattern, out)
109
+
110
+ assert matchobj is not None
111
+
112
+ # numbers are usually in the millions so 1000 is ok for tolerance
113
+ CPU_STATS_TOLERANCE = 1000
114
+ psutil_result = psutil.cpu_stats()
115
+ assert (
116
+ abs(psutil_result.ctx_switches - int(matchobj.group("cs")))
117
+ < CPU_STATS_TOLERANCE
118
+ )
119
+ assert (
120
+ abs(psutil_result.syscalls - int(matchobj.group("sysc")))
121
+ < CPU_STATS_TOLERANCE
122
+ )
123
+ assert (
124
+ abs(psutil_result.interrupts - int(matchobj.group("dev")))
125
+ < CPU_STATS_TOLERANCE
126
+ )
127
+ assert (
128
+ abs(psutil_result.soft_interrupts - int(matchobj.group("soft")))
129
+ < CPU_STATS_TOLERANCE
130
+ )
131
+
132
+ def test_cpu_count_logical(self):
133
+ out = sh('/usr/bin/mpstat -a')
134
+ mpstat_lcpu = int(re.search(r"lcpu=(\d+)", out).group(1))
135
+ psutil_lcpu = psutil.cpu_count(logical=True)
136
+ assert mpstat_lcpu == psutil_lcpu
137
+
138
+ def test_net_if_addrs_names(self):
139
+ out = sh('/etc/ifconfig -l')
140
+ ifconfig_names = set(out.split())
141
+ psutil_names = set(psutil.net_if_addrs().keys())
142
+ assert ifconfig_names == psutil_names