diff --git a/.gitattributes b/.gitattributes index 0b17a3b4c096f2ad3a58d71b393f77c2be66fefa..2fd6207c0ff66af92ae18ce3a1383ee5306432d6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -373,3 +373,10 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/ .venv/lib/python3.11/site-packages/numpy/core/_multiarray_tests.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text .venv/lib/python3.11/site-packages/numpy/core/_simd.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_add_newdocs.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +.venv/lib/python3.11/site-packages/numpy/core/__pycache__/fromnumeric.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_regression.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_multiarray.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_dtype.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_datetime.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_umath.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text +.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/.venv/lib/python3.11/site-packages/numpy/core/__pycache__/fromnumeric.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/__pycache__/fromnumeric.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c0461038b585de9ee9e5c62c3b9f8fb64134839 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/__pycache__/fromnumeric.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f708e7d1c6986af064a1f974c5fbc9ed8c2265f832b3816f9246f6928e59555 +size 138391 diff --git a/.venv/lib/python3.11/site-packages/numpy/core/lib/libnpymath.a b/.venv/lib/python3.11/site-packages/numpy/core/lib/libnpymath.a new file mode 100644 index 0000000000000000000000000000000000000000..96a955e032495a2f05c094967ca015d489ea6457 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/lib/libnpymath.a differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini b/.venv/lib/python3.11/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini new file mode 100644 index 0000000000000000000000000000000000000000..5840f5e1bc167f50ebc9fc98d60b60ee21ecbeec --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/lib/npy-pkg-config/mlib.ini @@ -0,0 +1,12 @@ +[meta] +Name = mlib +Description = Math library used with this version of numpy +Version = 1.0 + +[default] +Libs=-lm +Cflags= + +[msvc] +Libs=m.lib +Cflags= diff --git a/.venv/lib/python3.11/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini b/.venv/lib/python3.11/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini new file mode 100644 index 0000000000000000000000000000000000000000..3e465ad2aceafd52f512d279e0de93e271e330b0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/lib/npy-pkg-config/npymath.ini @@ -0,0 +1,20 @@ +[meta] +Name=npymath +Description=Portable, core math library implementing C99 standard +Version=0.1 + +[variables] +pkgname=numpy.core +prefix=${pkgdir} +libdir=${prefix}/lib +includedir=${prefix}/include + +[default] +Libs=-L${libdir} -lnpymath +Cflags=-I${includedir} +Requires=mlib + +[msvc] +Libs=/LIBPATH:${libdir} npymath.lib +Cflags=/INCLUDE:${includedir} +Requires=mlib diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5fa82e17020eca2069ce8c2b6a82712c84ef3b5 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08226b7d2ea26d2b5be431cda8b1517ef15d7335 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_api.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_api.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d9049c887077fb3868b54fae29c9581f572e537 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_api.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_argparse.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_argparse.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54fac8270762ea3eff9d9ac8a4d398d8c117dbcc Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_argparse.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_array_coercion.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_array_coercion.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ae1989f4e10495e2f927ec71824e97e3317b880 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_array_coercion.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_array_interface.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_array_interface.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3464e0c48747dc79025defee1f931fc7072e7121 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_array_interface.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_arraymethod.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_arraymethod.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb92dfeaf7618bce217b0062b3e19b3039d8409d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_arraymethod.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_casting_unittests.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_casting_unittests.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3135c63dc76a9882a51ffcb0265808651695145f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_casting_unittests.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_cpu_dispatcher.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_cpu_dispatcher.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e36f66f838728604afec0c817e019ea1d16a670 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_cpu_dispatcher.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_custom_dtypes.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_custom_dtypes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63624dd2cf0cdfc0d9f639cb78de2adb7c883952 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_custom_dtypes.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb325b09e4886492bb4010c0abdc9bbe90a42ed0 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_datetime.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_datetime.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6838f6be0ba46fe9a64b192940296183abe6d83b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_datetime.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e0c3d7232260c033eb81fd115df9726bc46c26f398f0de1994ae0b8f7282bee +size 182268 diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd1b9420a649ccf90ada778abb617e4772a131d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_dlpack.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_dlpack.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14def9add09ad9265acfb9ef9c55b033acbd34d6 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_dlpack.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_dtype.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_dtype.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..019044f327648a9fc481145964d7d1b951455e87 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_dtype.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:249b91291f022427627ce54ba7acda9347681c6235a77f7eda209890a65e9236 +size 126538 diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12a94d01fcf15e0b440ff8d06222b81b5885277a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_hashtable.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_hashtable.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e24daff41a038595f7c9c7f7772d616c50296f7 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_hashtable.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_indexing.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_indexing.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64a1a708a93a6dc829a5067e559a79668686a8e9 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_indexing.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_item_selection.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_item_selection.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f4c78f64f0d83b96fef32b01a42d6d8e7303615 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_item_selection.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_limited_api.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_limited_api.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14b183d29a53de34c8f1b29b3492fa0d32c89d8a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_limited_api.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_mem_overlap.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_mem_overlap.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..742a9e05d7c6a4dda5cdc6d2985ffc9c2059f609 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_mem_overlap.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_mem_policy.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_mem_policy.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52421a57276bd55238e75ffa88c7de878e81ef76 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_mem_policy.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_multiarray.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_multiarray.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1925dac1cd2ff7c7b5f0b459f4f9d2be95b5f554 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_multiarray.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd62abae0db116ba948fa1d70d5ef91e0486534e0108444ba177033c09619ba5 +size 722413 diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_numerictypes.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_numerictypes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e611d9c8185cbaf9f854baf2ced40542cca1519e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_numerictypes.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_overrides.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_overrides.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ef8e6b44281e96730937fb263c0f5427ab1fe0a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_overrides.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_regression.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_regression.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ecebdbb9b55108e70b43179ab184431e0ad90b6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_regression.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e7c3b082979ca8226acc59fe301ea52f4ee36a605eb1d216d55077b0b8cc74f +size 186475 diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fba267a105a3d083cd515a9886490f3d1c3450be Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_scalarinherit.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_scalarinherit.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51f4dbba09f8067b154f95abb5cbe01ba69ca70a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_scalarinherit.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_scalarprint.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_scalarprint.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15b24c9aa8687b85baddda794d94c1014d53d425 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_scalarprint.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3b39719c1fa514edf6979625bcb4700e43032fc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e30e90398a4cff151bd27deb21173050409f92bf3c2e96f3075065e074aee6a9 +size 226774 diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_umath.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_umath.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb38fd846f1d1a4d3915d4da45d879fffd4f195f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_umath.cpython-311.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:799aa0eeebadd79f25fa7555a7f72cf566483c415817b2b61812c9b44e0515d9 +size 373101 diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_umath_complex.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_umath_complex.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb69cd77d4a60e4a4f8bd92c8f2c25cf2bf402be Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/core/tests/__pycache__/test_umath_complex.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/data/astype_copy.pkl b/.venv/lib/python3.11/site-packages/numpy/core/tests/data/astype_copy.pkl new file mode 100644 index 0000000000000000000000000000000000000000..45694ae001c4a103365ff9fd5ae2da0dba3c11f6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/tests/data/astype_copy.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9564b309cbf3441ff0a6e4468fddaca46230fab34f15c77d87025a455bdf59d9 +size 716 diff --git a/.venv/lib/python3.11/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl b/.venv/lib/python3.11/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl new file mode 100644 index 0000000000000000000000000000000000000000..661ff83ca3d83da9c3812f0048f0b7d1ba62aa25 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/core/tests/data/numpy_2_0_array.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:561d36b5dc82ca96bc35be10cdd5619c30225c43b6590adcc1cbce7430c5179c +size 718 diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/__init__.py b/.venv/lib/python3.11/site-packages/numpy/distutils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f74ed4d3f6dbed79dd9cd8284ebd596853204398 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/__init__.py @@ -0,0 +1,64 @@ +""" +An enhanced distutils, providing support for Fortran compilers, for BLAS, +LAPACK and other common libraries for numerical computing, and more. + +Public submodules are:: + + misc_util + system_info + cpu_info + log + exec_command + +For details, please see the *Packaging* and *NumPy Distutils User Guide* +sections of the NumPy Reference Guide. + +For configuring the preference for and location of libraries like BLAS and +LAPACK, and for setting include paths and similar build options, please see +``site.cfg.example`` in the root of the NumPy repository or sdist. + +""" + +import warnings + +# Must import local ccompiler ASAP in order to get +# customized CCompiler.spawn effective. +from . import ccompiler +from . import unixccompiler + +from .npy_pkg_config import * + +warnings.warn("\n\n" + " `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n" + " of the deprecation of `distutils` itself. It will be removed for\n" + " Python >= 3.12. For older Python versions it will remain present.\n" + " It is recommended to use `setuptools < 60.0` for those Python versions.\n" + " For more details, see:\n" + " https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n", + DeprecationWarning, stacklevel=2 +) +del warnings + +# If numpy is installed, add distutils.test() +try: + from . import __config__ + # Normally numpy is installed if the above import works, but an interrupted + # in-place build could also have left a __config__.py. In that case the + # next import may still fail, so keep it inside the try block. + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester +except ImportError: + pass + + +def customized_fcompiler(plat=None, compiler=None): + from numpy.distutils.fcompiler import new_fcompiler + c = new_fcompiler(plat=plat, compiler=compiler) + c.customize() + return c + +def customized_ccompiler(plat=None, compiler=None, verbose=1): + c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose) + c.customize('') + return c diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/__init__.pyi b/.venv/lib/python3.11/site-packages/numpy/distutils/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3938d68de14c3f83f9278b5d6b6a151a28549a0d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/__init__.pyi @@ -0,0 +1,4 @@ +from typing import Any + +# TODO: remove when the full numpy namespace is defined +def __getattr__(name: str) -> Any: ... diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/_shell_utils.py b/.venv/lib/python3.11/site-packages/numpy/distutils/_shell_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..82abd5f4e0fee8e3241a90d587026b1f97ec2bfe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/_shell_utils.py @@ -0,0 +1,91 @@ +""" +Helper functions for interacting with the shell, and consuming shell-style +parameters provided in config files. +""" +import os +import shlex +import subprocess +try: + from shlex import quote +except ImportError: + from pipes import quote + +__all__ = ['WindowsParser', 'PosixParser', 'NativeParser'] + + +class CommandLineParser: + """ + An object that knows how to split and join command-line arguments. + + It must be true that ``argv == split(join(argv))`` for all ``argv``. + The reverse neednt be true - `join(split(cmd))` may result in the addition + or removal of unnecessary escaping. + """ + @staticmethod + def join(argv): + """ Join a list of arguments into a command line string """ + raise NotImplementedError + + @staticmethod + def split(cmd): + """ Split a command line string into a list of arguments """ + raise NotImplementedError + + +class WindowsParser: + """ + The parsing behavior used by `subprocess.call("string")` on Windows, which + matches the Microsoft C/C++ runtime. + + Note that this is _not_ the behavior of cmd. + """ + @staticmethod + def join(argv): + # note that list2cmdline is specific to the windows syntax + return subprocess.list2cmdline(argv) + + @staticmethod + def split(cmd): + import ctypes # guarded import for systems without ctypes + try: + ctypes.windll + except AttributeError: + raise NotImplementedError + + # Windows has special parsing rules for the executable (no quotes), + # that we do not care about - insert a dummy element + if not cmd: + return [] + cmd = 'dummy ' + cmd + + CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW + CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p) + CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int)) + + nargs = ctypes.c_int() + lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs)) + args = [lpargs[i] for i in range(nargs.value)] + assert not ctypes.windll.kernel32.LocalFree(lpargs) + + # strip the element we inserted + assert args[0] == "dummy" + return args[1:] + + +class PosixParser: + """ + The parsing behavior used by `subprocess.call("string", shell=True)` on Posix. + """ + @staticmethod + def join(argv): + return ' '.join(quote(arg) for arg in argv) + + @staticmethod + def split(cmd): + return shlex.split(cmd, posix=True) + + +if os.name == 'nt': + NativeParser = WindowsParser +elif os.name == 'posix': + NativeParser = PosixParser diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/armccompiler.py b/.venv/lib/python3.11/site-packages/numpy/distutils/armccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..afba7eb3b3529835e59a52b42f7b143225faf465 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/armccompiler.py @@ -0,0 +1,26 @@ +from distutils.unixccompiler import UnixCCompiler + +class ArmCCompiler(UnixCCompiler): + + """ + Arm compiler. + """ + + compiler_type = 'arm' + cc_exe = 'armclang' + cxx_exe = 'armclang++' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables(compiler=cc_compiler + + ' -O3 -fPIC', + compiler_so=cc_compiler + + ' -O3 -fPIC', + compiler_cxx=cxx_compiler + + ' -O3 -fPIC', + linker_exe=cc_compiler + + ' -lamath', + linker_so=cc_compiler + + ' -lamath -shared') diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/ccompiler.py b/.venv/lib/python3.11/site-packages/numpy/distutils/ccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..40f495fc7aa82c10ad31082821729724782f86fa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/ccompiler.py @@ -0,0 +1,826 @@ +import os +import re +import sys +import platform +import shlex +import time +import subprocess +from copy import copy +from pathlib import Path +from distutils import ccompiler +from distutils.ccompiler import ( + compiler_class, gen_lib_options, get_default_compiler, new_compiler, + CCompiler +) +from distutils.errors import ( + DistutilsExecError, DistutilsModuleError, DistutilsPlatformError, + CompileError, UnknownFileError +) +from distutils.sysconfig import customize_compiler +from distutils.version import LooseVersion + +from numpy.distutils import log +from numpy.distutils.exec_command import ( + filepath_from_subprocess_output, forward_bytes_to_stdout +) +from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ + get_num_build_jobs, \ + _commandline_dep_string, \ + sanitize_cxx_flags + +# globals for parallel build management +import threading + +_job_semaphore = None +_global_lock = threading.Lock() +_processing_files = set() + + +def _needs_build(obj, cc_args, extra_postargs, pp_opts): + """ + Check if an objects needs to be rebuild based on its dependencies + + Parameters + ---------- + obj : str + object file + + Returns + ------- + bool + """ + # defined in unixcompiler.py + dep_file = obj + '.d' + if not os.path.exists(dep_file): + return True + + # dep_file is a makefile containing 'object: dependencies' + # formatted like posix shell (spaces escaped, \ line continuations) + # the last line contains the compiler commandline arguments as some + # projects may compile an extension multiple times with different + # arguments + with open(dep_file) as f: + lines = f.readlines() + + cmdline =_commandline_dep_string(cc_args, extra_postargs, pp_opts) + last_cmdline = lines[-1] + if last_cmdline != cmdline: + return True + + contents = ''.join(lines[:-1]) + deps = [x for x in shlex.split(contents, posix=True) + if x != "\n" and not x.endswith(":")] + + try: + t_obj = os.stat(obj).st_mtime + + # check if any of the dependencies is newer than the object + # the dependencies includes the source used to create the object + for f in deps: + if os.stat(f).st_mtime > t_obj: + return True + except OSError: + # no object counts as newer (shouldn't happen if dep_file exists) + return True + + return False + + +def replace_method(klass, method_name, func): + # Py3k does not have unbound method anymore, MethodType does not work + m = lambda self, *args, **kw: func(self, *args, **kw) + setattr(klass, method_name, m) + + +###################################################################### +## Method that subclasses may redefine. But don't call this method, +## it i private to CCompiler class and may return unexpected +## results if used elsewhere. So, you have been warned.. + +def CCompiler_find_executables(self): + """ + Does nothing here, but is called by the get_version method and can be + overridden by subclasses. In particular it is redefined in the `FCompiler` + class where more documentation can be found. + + """ + pass + + +replace_method(CCompiler, 'find_executables', CCompiler_find_executables) + + +# Using customized CCompiler.spawn. +def CCompiler_spawn(self, cmd, display=None, env=None): + """ + Execute a command in a sub-process. + + Parameters + ---------- + cmd : str + The command to execute. + display : str or sequence of str, optional + The text to add to the log file kept by `numpy.distutils`. + If not given, `display` is equal to `cmd`. + env : a dictionary for environment variables, optional + + Returns + ------- + None + + Raises + ------ + DistutilsExecError + If the command failed, i.e. the exit status was not 0. + + """ + env = env if env is not None else dict(os.environ) + if display is None: + display = cmd + if is_sequence(display): + display = ' '.join(list(display)) + log.info(display) + try: + if self.verbose: + subprocess.check_output(cmd, env=env) + else: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) + except subprocess.CalledProcessError as exc: + o = exc.output + s = exc.returncode + except OSError as e: + # OSError doesn't have the same hooks for the exception + # output, but exec_command() historically would use an + # empty string for EnvironmentError (base class for + # OSError) + # o = b'' + # still that would make the end-user lost in translation! + o = f"\n\n{e}\n\n\n" + try: + o = o.encode(sys.stdout.encoding) + except AttributeError: + o = o.encode('utf8') + # status previously used by exec_command() for parent + # of OSError + s = 127 + else: + # use a convenience return here so that any kind of + # caught exception will execute the default code after the + # try / except block, which handles various exceptions + return None + + if is_sequence(cmd): + cmd = ' '.join(list(cmd)) + + if self.verbose: + forward_bytes_to_stdout(o) + + if re.search(b'Too many open files', o): + msg = '\nTry rerunning setup command until build succeeds.' + else: + msg = '' + raise DistutilsExecError('Command "%s" failed with exit status %d%s' % + (cmd, s, msg)) + +replace_method(CCompiler, 'spawn', CCompiler_spawn) + +def CCompiler_object_filenames(self, source_filenames, strip_dir=0, output_dir=''): + """ + Return the name of the object files for the given source files. + + Parameters + ---------- + source_filenames : list of str + The list of paths to source files. Paths can be either relative or + absolute, this is handled transparently. + strip_dir : bool, optional + Whether to strip the directory from the returned paths. If True, + the file name prepended by `output_dir` is returned. Default is False. + output_dir : str, optional + If given, this path is prepended to the returned paths to the + object files. + + Returns + ------- + obj_names : list of str + The list of paths to the object files corresponding to the source + files in `source_filenames`. + + """ + if output_dir is None: + output_dir = '' + obj_names = [] + for src_name in source_filenames: + base, ext = os.path.splitext(os.path.normpath(src_name)) + base = os.path.splitdrive(base)[1] # Chop off the drive + base = base[os.path.isabs(base):] # If abs, chop off leading / + if base.startswith('..'): + # Resolve starting relative path components, middle ones + # (if any) have been handled by os.path.normpath above. + i = base.rfind('..')+2 + d = base[:i] + d = os.path.basename(os.path.abspath(d)) + base = d + base[i:] + if ext not in self.src_extensions: + raise UnknownFileError("unknown file type '%s' (from '%s')" % (ext, src_name)) + if strip_dir: + base = os.path.basename(base) + obj_name = os.path.join(output_dir, base + self.obj_extension) + obj_names.append(obj_name) + return obj_names + +replace_method(CCompiler, 'object_filenames', CCompiler_object_filenames) + +def CCompiler_compile(self, sources, output_dir=None, macros=None, + include_dirs=None, debug=0, extra_preargs=None, + extra_postargs=None, depends=None): + """ + Compile one or more source files. + + Please refer to the Python distutils API reference for more details. + + Parameters + ---------- + sources : list of str + A list of filenames + output_dir : str, optional + Path to the output directory. + macros : list of tuples + A list of macro definitions. + include_dirs : list of str, optional + The directories to add to the default include file search path for + this compilation only. + debug : bool, optional + Whether or not to output debug symbols in or alongside the object + file(s). + extra_preargs, extra_postargs : ? + Extra pre- and post-arguments. + depends : list of str, optional + A list of file names that all targets depend on. + + Returns + ------- + objects : list of str + A list of object file names, one per source file `sources`. + + Raises + ------ + CompileError + If compilation fails. + + """ + global _job_semaphore + + jobs = get_num_build_jobs() + + # setup semaphore to not exceed number of compile jobs when parallelized at + # extension level (python >= 3.5) + with _global_lock: + if _job_semaphore is None: + _job_semaphore = threading.Semaphore(jobs) + + if not sources: + return [] + from numpy.distutils.fcompiler import (FCompiler, + FORTRAN_COMMON_FIXED_EXTENSIONS, + has_f90_header) + if isinstance(self, FCompiler): + display = [] + for fc in ['f77', 'f90', 'fix']: + fcomp = getattr(self, 'compiler_'+fc) + if fcomp is None: + continue + display.append("Fortran %s compiler: %s" % (fc, ' '.join(fcomp))) + display = '\n'.join(display) + else: + ccomp = self.compiler_so + display = "C compiler: %s\n" % (' '.join(ccomp),) + log.info(display) + macros, objects, extra_postargs, pp_opts, build = \ + self._setup_compile(output_dir, macros, include_dirs, sources, + depends, extra_postargs) + cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) + display = "compile options: '%s'" % (' '.join(cc_args)) + if extra_postargs: + display += "\nextra options: '%s'" % (' '.join(extra_postargs)) + log.info(display) + + def single_compile(args): + obj, (src, ext) = args + if not _needs_build(obj, cc_args, extra_postargs, pp_opts): + return + + # check if we are currently already processing the same object + # happens when using the same source in multiple extensions + while True: + # need explicit lock as there is no atomic check and add with GIL + with _global_lock: + # file not being worked on, start working + if obj not in _processing_files: + _processing_files.add(obj) + break + # wait for the processing to end + time.sleep(0.1) + + try: + # retrieve slot from our #job semaphore and build + with _job_semaphore: + self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) + finally: + # register being done processing + with _global_lock: + _processing_files.remove(obj) + + + if isinstance(self, FCompiler): + objects_to_build = list(build.keys()) + f77_objects, other_objects = [], [] + for obj in objects: + if obj in objects_to_build: + src, ext = build[obj] + if self.compiler_type=='absoft': + obj = cyg2win32(obj) + src = cyg2win32(src) + if Path(src).suffix.lower() in FORTRAN_COMMON_FIXED_EXTENSIONS \ + and not has_f90_header(src): + f77_objects.append((obj, (src, ext))) + else: + other_objects.append((obj, (src, ext))) + + # f77 objects can be built in parallel + build_items = f77_objects + # build f90 modules serial, module files are generated during + # compilation and may be used by files later in the list so the + # ordering is important + for o in other_objects: + single_compile(o) + else: + build_items = build.items() + + if len(build) > 1 and jobs > 1: + # build parallel + from concurrent.futures import ThreadPoolExecutor + with ThreadPoolExecutor(jobs) as pool: + res = pool.map(single_compile, build_items) + list(res) # access result to raise errors + else: + # build serial + for o in build_items: + single_compile(o) + + # Return *all* object filenames, not just the ones we just built. + return objects + +replace_method(CCompiler, 'compile', CCompiler_compile) + +def CCompiler_customize_cmd(self, cmd, ignore=()): + """ + Customize compiler using distutils command. + + Parameters + ---------- + cmd : class instance + An instance inheriting from `distutils.cmd.Command`. + ignore : sequence of str, optional + List of `CCompiler` commands (without ``'set_'``) that should not be + altered. Strings that are checked for are: + ``('include_dirs', 'define', 'undef', 'libraries', 'library_dirs', + 'rpath', 'link_objects')``. + + Returns + ------- + None + + """ + log.info('customize %s using %s' % (self.__class__.__name__, + cmd.__class__.__name__)) + + if ( + hasattr(self, 'compiler') and + 'clang' in self.compiler[0] and + not (platform.machine() == 'arm64' and sys.platform == 'darwin') + ): + # clang defaults to a non-strict floating error point model. + # However, '-ftrapping-math' is not currently supported (2023-04-08) + # for macosx_arm64. + # Since NumPy and most Python libs give warnings for these, override: + self.compiler.append('-ftrapping-math') + self.compiler_so.append('-ftrapping-math') + + def allow(attr): + return getattr(cmd, attr, None) is not None and attr not in ignore + + if allow('include_dirs'): + self.set_include_dirs(cmd.include_dirs) + if allow('define'): + for (name, value) in cmd.define: + self.define_macro(name, value) + if allow('undef'): + for macro in cmd.undef: + self.undefine_macro(macro) + if allow('libraries'): + self.set_libraries(self.libraries + cmd.libraries) + if allow('library_dirs'): + self.set_library_dirs(self.library_dirs + cmd.library_dirs) + if allow('rpath'): + self.set_runtime_library_dirs(cmd.rpath) + if allow('link_objects'): + self.set_link_objects(cmd.link_objects) + +replace_method(CCompiler, 'customize_cmd', CCompiler_customize_cmd) + +def _compiler_to_string(compiler): + props = [] + mx = 0 + keys = list(compiler.executables.keys()) + for key in ['version', 'libraries', 'library_dirs', + 'object_switch', 'compile_switch', + 'include_dirs', 'define', 'undef', 'rpath', 'link_objects']: + if key not in keys: + keys.append(key) + for key in keys: + if hasattr(compiler, key): + v = getattr(compiler, key) + mx = max(mx, len(key)) + props.append((key, repr(v))) + fmt = '%-' + repr(mx+1) + 's = %s' + lines = [fmt % prop for prop in props] + return '\n'.join(lines) + +def CCompiler_show_customization(self): + """ + Print the compiler customizations to stdout. + + Parameters + ---------- + None + + Returns + ------- + None + + Notes + ----- + Printing is only done if the distutils log threshold is < 2. + + """ + try: + self.get_version() + except Exception: + pass + if log._global_log.threshold<2: + print('*'*80) + print(self.__class__) + print(_compiler_to_string(self)) + print('*'*80) + +replace_method(CCompiler, 'show_customization', CCompiler_show_customization) + +def CCompiler_customize(self, dist, need_cxx=0): + """ + Do any platform-specific customization of a compiler instance. + + This method calls `distutils.sysconfig.customize_compiler` for + platform-specific customization, as well as optionally remove a flag + to suppress spurious warnings in case C++ code is being compiled. + + Parameters + ---------- + dist : object + This parameter is not used for anything. + need_cxx : bool, optional + Whether or not C++ has to be compiled. If so (True), the + ``"-Wstrict-prototypes"`` option is removed to prevent spurious + warnings. Default is False. + + Returns + ------- + None + + Notes + ----- + All the default options used by distutils can be extracted with:: + + from distutils import sysconfig + sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', + 'CCSHARED', 'LDSHARED', 'SO') + + """ + # See FCompiler.customize for suggested usage. + log.info('customize %s' % (self.__class__.__name__)) + customize_compiler(self) + if need_cxx: + # In general, distutils uses -Wstrict-prototypes, but this option is + # not valid for C++ code, only for C. Remove it if it's there to + # avoid a spurious warning on every compilation. + try: + self.compiler_so.remove('-Wstrict-prototypes') + except (AttributeError, ValueError): + pass + + if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: + if not self.compiler_cxx: + if self.compiler[0].startswith('gcc'): + a, b = 'gcc', 'g++' + else: + a, b = 'cc', 'c++' + self.compiler_cxx = [self.compiler[0].replace(a, b)]\ + + self.compiler[1:] + else: + if hasattr(self, 'compiler'): + log.warn("#### %s #######" % (self.compiler,)) + if not hasattr(self, 'compiler_cxx'): + log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) + + + # check if compiler supports gcc style automatic dependencies + # run on every extension so skip for known good compilers + if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or + 'g++' in self.compiler[0] or + 'clang' in self.compiler[0]): + self._auto_depends = True + elif os.name == 'posix': + import tempfile + import shutil + tmpdir = tempfile.mkdtemp() + try: + fn = os.path.join(tmpdir, "file.c") + with open(fn, "w") as f: + f.write("int a;\n") + self.compile([fn], output_dir=tmpdir, + extra_preargs=['-MMD', '-MF', fn + '.d']) + self._auto_depends = True + except CompileError: + self._auto_depends = False + finally: + shutil.rmtree(tmpdir) + + return + +replace_method(CCompiler, 'customize', CCompiler_customize) + +def simple_version_match(pat=r'[-.\d]+', ignore='', start=''): + """ + Simple matching of version numbers, for use in CCompiler and FCompiler. + + Parameters + ---------- + pat : str, optional + A regular expression matching version numbers. + Default is ``r'[-.\\d]+'``. + ignore : str, optional + A regular expression matching patterns to skip. + Default is ``''``, in which case nothing is skipped. + start : str, optional + A regular expression matching the start of where to start looking + for version numbers. + Default is ``''``, in which case searching is started at the + beginning of the version string given to `matcher`. + + Returns + ------- + matcher : callable + A function that is appropriate to use as the ``.version_match`` + attribute of a `CCompiler` class. `matcher` takes a single parameter, + a version string. + + """ + def matcher(self, version_string): + # version string may appear in the second line, so getting rid + # of new lines: + version_string = version_string.replace('\n', ' ') + pos = 0 + if start: + m = re.match(start, version_string) + if not m: + return None + pos = m.end() + while True: + m = re.search(pat, version_string[pos:]) + if not m: + return None + if ignore and re.match(ignore, m.group(0)): + pos = m.end() + continue + break + return m.group(0) + return matcher + +def CCompiler_get_version(self, force=False, ok_status=[0]): + """ + Return compiler version, or None if compiler is not available. + + Parameters + ---------- + force : bool, optional + If True, force a new determination of the version, even if the + compiler already has a version attribute. Default is False. + ok_status : list of int, optional + The list of status values returned by the version look-up process + for which a version string is returned. If the status value is not + in `ok_status`, None is returned. Default is ``[0]``. + + Returns + ------- + version : str or None + Version string, in the format of `distutils.version.LooseVersion`. + + """ + if not force and hasattr(self, 'version'): + return self.version + self.find_executables() + try: + version_cmd = self.version_cmd + except AttributeError: + return None + if not version_cmd or not version_cmd[0]: + return None + try: + matcher = self.version_match + except AttributeError: + try: + pat = self.version_pattern + except AttributeError: + return None + def matcher(version_string): + m = re.match(pat, version_string) + if not m: + return None + version = m.group('version') + return version + + try: + output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as exc: + output = exc.output + status = exc.returncode + except OSError: + # match the historical returns for a parent + # exception class caught by exec_command() + status = 127 + output = b'' + else: + # output isn't actually a filepath but we do this + # for now to match previous distutils behavior + output = filepath_from_subprocess_output(output) + status = 0 + + version = None + if status in ok_status: + version = matcher(output) + if version: + version = LooseVersion(version) + self.version = version + return version + +replace_method(CCompiler, 'get_version', CCompiler_get_version) + +def CCompiler_cxx_compiler(self): + """ + Return the C++ compiler. + + Parameters + ---------- + None + + Returns + ------- + cxx : class instance + The C++ compiler, as a `CCompiler` instance. + + """ + if self.compiler_type in ('msvc', 'intelw', 'intelemw'): + return self + + cxx = copy(self) + cxx.compiler_cxx = cxx.compiler_cxx + cxx.compiler_so = [cxx.compiler_cxx[0]] + \ + sanitize_cxx_flags(cxx.compiler_so[1:]) + if (sys.platform.startswith(('aix', 'os400')) and + 'ld_so_aix' in cxx.linker_so[0]): + # AIX needs the ld_so_aix script included with Python + cxx.linker_so = [cxx.linker_so[0], cxx.compiler_cxx[0]] \ + + cxx.linker_so[2:] + if sys.platform.startswith('os400'): + #This is required by i 7.4 and prievous for PRId64 in printf() call. + cxx.compiler_so.append('-D__STDC_FORMAT_MACROS') + #This a bug of gcc10.3, which failed to handle the TLS init. + cxx.compiler_so.append('-fno-extern-tls-init') + cxx.linker_so.append('-fno-extern-tls-init') + else: + cxx.linker_so = [cxx.compiler_cxx[0]] + cxx.linker_so[1:] + return cxx + +replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler) + +compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler', + "Intel C Compiler for 32-bit applications") +compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler', + "Intel C Itanium Compiler for Itanium-based applications") +compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler', + "Intel C Compiler for 64-bit applications") +compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW', + "Intel C Compiler for 32-bit applications on Windows") +compiler_class['intelemw'] = ('intelccompiler', 'IntelEM64TCCompilerW', + "Intel C Compiler for 64-bit applications on Windows") +compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler', + "PathScale Compiler for SiCortex-based applications") +compiler_class['arm'] = ('armccompiler', 'ArmCCompiler', + "Arm C Compiler") +compiler_class['fujitsu'] = ('fujitsuccompiler', 'FujitsuCCompiler', + "Fujitsu C Compiler") + +ccompiler._default_compilers += (('linux.*', 'intel'), + ('linux.*', 'intele'), + ('linux.*', 'intelem'), + ('linux.*', 'pathcc'), + ('nt', 'intelw'), + ('nt', 'intelemw')) + +if sys.platform == 'win32': + compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler', + "Mingw32 port of GNU C Compiler for Win32"\ + "(for MSC built Python)") + if mingw32(): + # On windows platforms, we want to default to mingw32 (gcc) + # because msvc can't build blitz stuff. + log.info('Setting mingw32 as default compiler for nt.') + ccompiler._default_compilers = (('nt', 'mingw32'),) \ + + ccompiler._default_compilers + + +_distutils_new_compiler = new_compiler +def new_compiler (plat=None, + compiler=None, + verbose=None, + dry_run=0, + force=0): + # Try first C compilers from numpy.distutils. + if verbose is None: + verbose = log.get_threshold() <= log.INFO + if plat is None: + plat = os.name + try: + if compiler is None: + compiler = get_default_compiler(plat) + (module_name, class_name, long_description) = compiler_class[compiler] + except KeyError: + msg = "don't know how to compile C/C++ code on platform '%s'" % plat + if compiler is not None: + msg = msg + " with '%s' compiler" % compiler + raise DistutilsPlatformError(msg) + module_name = "numpy.distutils." + module_name + try: + __import__ (module_name) + except ImportError as e: + msg = str(e) + log.info('%s in numpy.distutils; trying from distutils', + str(msg)) + module_name = module_name[6:] + try: + __import__(module_name) + except ImportError as e: + msg = str(e) + raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \ + module_name) + try: + module = sys.modules[module_name] + klass = vars(module)[class_name] + except KeyError: + raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " + + "in module '%s'") % (class_name, module_name)) + compiler = klass(None, dry_run, force) + compiler.verbose = verbose + log.debug('new_compiler returns %s' % (klass)) + return compiler + +ccompiler.new_compiler = new_compiler + +_distutils_gen_lib_options = gen_lib_options +def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries): + # the version of this function provided by CPython allows the following + # to return lists, which are unpacked automatically: + # - compiler.runtime_library_dir_option + # our version extends the behavior to: + # - compiler.library_dir_option + # - compiler.library_option + # - compiler.find_library_file + r = _distutils_gen_lib_options(compiler, library_dirs, + runtime_library_dirs, libraries) + lib_opts = [] + for i in r: + if is_sequence(i): + lib_opts.extend(list(i)) + else: + lib_opts.append(i) + return lib_opts +ccompiler.gen_lib_options = gen_lib_options + +# Also fix up the various compiler modules, which do +# from distutils.ccompiler import gen_lib_options +# Don't bother with mwerks, as we don't support Classic Mac. +for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: + _m = sys.modules.get('distutils.' + _cc + 'compiler') + if _m is not None: + setattr(_m, 'gen_lib_options', gen_lib_options) + diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/ccompiler_opt.py b/.venv/lib/python3.11/site-packages/numpy/distutils/ccompiler_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..37a5368b0b82180d00c5835f0a30e5456020137c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/ccompiler_opt.py @@ -0,0 +1,2668 @@ +"""Provides the `CCompilerOpt` class, used for handling the CPU/hardware +optimization, starting from parsing the command arguments, to managing the +relation between the CPU baseline and dispatch-able features, +also generating the required C headers and ending with compiling +the sources with proper compiler's flags. + +`CCompilerOpt` doesn't provide runtime detection for the CPU features, +instead only focuses on the compiler side, but it creates abstract C headers +that can be used later for the final runtime dispatching process.""" + +import atexit +import inspect +import os +import pprint +import re +import subprocess +import textwrap + +class _Config: + """An abstract class holds all configurable attributes of `CCompilerOpt`, + these class attributes can be used to change the default behavior + of `CCompilerOpt` in order to fit other requirements. + + Attributes + ---------- + conf_nocache : bool + Set True to disable memory and file cache. + Default is False. + + conf_noopt : bool + Set True to forces the optimization to be disabled, + in this case `CCompilerOpt` tends to generate all + expected headers in order to 'not' break the build. + Default is False. + + conf_cache_factors : list + Add extra factors to the primary caching factors. The caching factors + are utilized to determine if there are changes had happened that + requires to discard the cache and re-updating it. The primary factors + are the arguments of `CCompilerOpt` and `CCompiler`'s properties(type, flags, etc). + Default is list of two items, containing the time of last modification + of `ccompiler_opt` and value of attribute "conf_noopt" + + conf_tmp_path : str, + The path of temporary directory. Default is auto-created + temporary directory via ``tempfile.mkdtemp()``. + + conf_check_path : str + The path of testing files. Each added CPU feature must have a + **C** source file contains at least one intrinsic or instruction that + related to this feature, so it can be tested against the compiler. + Default is ``./distutils/checks``. + + conf_target_groups : dict + Extra tokens that can be reached from dispatch-able sources through + the special mark ``@targets``. Default is an empty dictionary. + + **Notes**: + - case-insensitive for tokens and group names + - sign '#' must stick in the begin of group name and only within ``@targets`` + + **Example**: + .. code-block:: console + + $ "@targets #avx_group other_tokens" > group_inside.c + + >>> CCompilerOpt.conf_target_groups["avx_group"] = \\ + "$werror $maxopt avx2 avx512f avx512_skx" + >>> cco = CCompilerOpt(cc_instance) + >>> cco.try_dispatch(["group_inside.c"]) + + conf_c_prefix : str + The prefix of public C definitions. Default is ``"NPY_"``. + + conf_c_prefix_ : str + The prefix of internal C definitions. Default is ``"NPY__"``. + + conf_cc_flags : dict + Nested dictionaries defining several compiler flags + that linked to some major functions, the main key + represent the compiler name and sub-keys represent + flags names. Default is already covers all supported + **C** compilers. + + Sub-keys explained as follows: + + "native": str or None + used by argument option `native`, to detect the current + machine support via the compiler. + "werror": str or None + utilized to treat warning as errors during testing CPU features + against the compiler and also for target's policy `$werror` + via dispatch-able sources. + "maxopt": str or None + utilized for target's policy '$maxopt' and the value should + contains the maximum acceptable optimization by the compiler. + e.g. in gcc `'-O3'` + + **Notes**: + * case-sensitive for compiler names and flags + * use space to separate multiple flags + * any flag will tested against the compiler and it will skipped + if it's not applicable. + + conf_min_features : dict + A dictionary defines the used CPU features for + argument option `'min'`, the key represent the CPU architecture + name e.g. `'x86'`. Default values provide the best effort + on wide range of users platforms. + + **Note**: case-sensitive for architecture names. + + conf_features : dict + Nested dictionaries used for identifying the CPU features. + the primary key is represented as a feature name or group name + that gathers several features. Default values covers all + supported features but without the major options like "flags", + these undefined options handle it by method `conf_features_partial()`. + Default value is covers almost all CPU features for *X86*, *IBM/Power64* + and *ARM 7/8*. + + Sub-keys explained as follows: + + "implies" : str or list, optional, + List of CPU feature names to be implied by it, + the feature name must be defined within `conf_features`. + Default is None. + + "flags": str or list, optional + List of compiler flags. Default is None. + + "detect": str or list, optional + List of CPU feature names that required to be detected + in runtime. By default, its the feature name or features + in "group" if its specified. + + "implies_detect": bool, optional + If True, all "detect" of implied features will be combined. + Default is True. see `feature_detect()`. + + "group": str or list, optional + Same as "implies" but doesn't require the feature name to be + defined within `conf_features`. + + "interest": int, required + a key for sorting CPU features + + "headers": str or list, optional + intrinsics C header file + + "disable": str, optional + force disable feature, the string value should contains the + reason of disabling. + + "autovec": bool or None, optional + True or False to declare that CPU feature can be auto-vectorized + by the compiler. + By default(None), treated as True if the feature contains at + least one applicable flag. see `feature_can_autovec()` + + "extra_checks": str or list, optional + Extra test case names for the CPU feature that need to be tested + against the compiler. + + Each test case must have a C file named ``extra_xxxx.c``, where + ``xxxx`` is the case name in lower case, under 'conf_check_path'. + It should contain at least one intrinsic or function related to the test case. + + If the compiler able to successfully compile the C file then `CCompilerOpt` + will add a C ``#define`` for it into the main dispatch header, e.g. + ``#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case. + + **NOTES**: + * space can be used as separator with options that supports "str or list" + * case-sensitive for all values and feature name must be in upper-case. + * if flags aren't applicable, its will skipped rather than disable the + CPU feature + * the CPU feature will disabled if the compiler fail to compile + the test file + """ + conf_nocache = False + conf_noopt = False + conf_cache_factors = None + conf_tmp_path = None + conf_check_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "checks" + ) + conf_target_groups = {} + conf_c_prefix = 'NPY_' + conf_c_prefix_ = 'NPY__' + conf_cc_flags = dict( + gcc = dict( + # native should always fail on arm and ppc64, + # native usually works only with x86 + native = '-march=native', + opt = '-O3', + werror = '-Werror', + ), + clang = dict( + native = '-march=native', + opt = "-O3", + # One of the following flags needs to be applicable for Clang to + # guarantee the sanity of the testing process, however in certain + # cases `-Werror` gets skipped during the availability test due to + # "unused arguments" warnings. + # see https://github.com/numpy/numpy/issues/19624 + werror = '-Werror=switch -Werror', + ), + icc = dict( + native = '-xHost', + opt = '-O3', + werror = '-Werror', + ), + iccw = dict( + native = '/QxHost', + opt = '/O3', + werror = '/Werror', + ), + msvc = dict( + native = None, + opt = '/O2', + werror = '/WX', + ), + fcc = dict( + native = '-mcpu=a64fx', + opt = None, + werror = None, + ) + ) + conf_min_features = dict( + x86 = "SSE SSE2", + x64 = "SSE SSE2 SSE3", + ppc64 = '', # play it safe + ppc64le = "VSX VSX2", + s390x = '', + armhf = '', # play it safe + aarch64 = "NEON NEON_FP16 NEON_VFPV4 ASIMD" + ) + conf_features = dict( + # X86 + SSE = dict( + interest=1, headers="xmmintrin.h", + # enabling SSE without SSE2 is useless also + # it's non-optional for x86_64 + implies="SSE2" + ), + SSE2 = dict(interest=2, implies="SSE", headers="emmintrin.h"), + SSE3 = dict(interest=3, implies="SSE2", headers="pmmintrin.h"), + SSSE3 = dict(interest=4, implies="SSE3", headers="tmmintrin.h"), + SSE41 = dict(interest=5, implies="SSSE3", headers="smmintrin.h"), + POPCNT = dict(interest=6, implies="SSE41", headers="popcntintrin.h"), + SSE42 = dict(interest=7, implies="POPCNT"), + AVX = dict( + interest=8, implies="SSE42", headers="immintrin.h", + implies_detect=False + ), + XOP = dict(interest=9, implies="AVX", headers="x86intrin.h"), + FMA4 = dict(interest=10, implies="AVX", headers="x86intrin.h"), + F16C = dict(interest=11, implies="AVX"), + FMA3 = dict(interest=12, implies="F16C"), + AVX2 = dict(interest=13, implies="F16C"), + AVX512F = dict( + interest=20, implies="FMA3 AVX2", implies_detect=False, + extra_checks="AVX512F_REDUCE" + ), + AVX512CD = dict(interest=21, implies="AVX512F"), + AVX512_KNL = dict( + interest=40, implies="AVX512CD", group="AVX512ER AVX512PF", + detect="AVX512_KNL", implies_detect=False + ), + AVX512_KNM = dict( + interest=41, implies="AVX512_KNL", + group="AVX5124FMAPS AVX5124VNNIW AVX512VPOPCNTDQ", + detect="AVX512_KNM", implies_detect=False + ), + AVX512_SKX = dict( + interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ", + detect="AVX512_SKX", implies_detect=False, + extra_checks="AVX512BW_MASK AVX512DQ_MASK" + ), + AVX512_CLX = dict( + interest=43, implies="AVX512_SKX", group="AVX512VNNI", + detect="AVX512_CLX" + ), + AVX512_CNL = dict( + interest=44, implies="AVX512_SKX", group="AVX512IFMA AVX512VBMI", + detect="AVX512_CNL", implies_detect=False + ), + AVX512_ICL = dict( + interest=45, implies="AVX512_CLX AVX512_CNL", + group="AVX512VBMI2 AVX512BITALG AVX512VPOPCNTDQ", + detect="AVX512_ICL", implies_detect=False + ), + AVX512_SPR = dict( + interest=46, implies="AVX512_ICL", group="AVX512FP16", + detect="AVX512_SPR", implies_detect=False + ), + # IBM/Power + ## Power7/ISA 2.06 + VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"), + ## Power8/ISA 2.07 + VSX2 = dict(interest=2, implies="VSX", implies_detect=False), + ## Power9/ISA 3.00 + VSX3 = dict(interest=3, implies="VSX2", implies_detect=False, + extra_checks="VSX3_HALF_DOUBLE"), + ## Power10/ISA 3.1 + VSX4 = dict(interest=4, implies="VSX3", implies_detect=False, + extra_checks="VSX4_MMA"), + # IBM/Z + ## VX(z13) support + VX = dict(interest=1, headers="vecintrin.h"), + ## Vector-Enhancements Facility + VXE = dict(interest=2, implies="VX", implies_detect=False), + ## Vector-Enhancements Facility 2 + VXE2 = dict(interest=3, implies="VXE", implies_detect=False), + # ARM + NEON = dict(interest=1, headers="arm_neon.h"), + NEON_FP16 = dict(interest=2, implies="NEON"), + ## FMA + NEON_VFPV4 = dict(interest=3, implies="NEON_FP16"), + ## Advanced SIMD + ASIMD = dict(interest=4, implies="NEON_FP16 NEON_VFPV4", implies_detect=False), + ## ARMv8.2 half-precision & vector arithm + ASIMDHP = dict(interest=5, implies="ASIMD"), + ## ARMv8.2 dot product + ASIMDDP = dict(interest=6, implies="ASIMD"), + ## ARMv8.2 Single & half-precision Multiply + ASIMDFHM = dict(interest=7, implies="ASIMDHP"), + ) + def conf_features_partial(self): + """Return a dictionary of supported CPU features by the platform, + and accumulate the rest of undefined options in `conf_features`, + the returned dict has same rules and notes in + class attribute `conf_features`, also its override + any options that been set in 'conf_features'. + """ + if self.cc_noopt: + # optimization is disabled + return {} + + on_x86 = self.cc_on_x86 or self.cc_on_x64 + is_unix = self.cc_is_gcc or self.cc_is_clang or self.cc_is_fcc + + if on_x86 and is_unix: return dict( + SSE = dict(flags="-msse"), + SSE2 = dict(flags="-msse2"), + SSE3 = dict(flags="-msse3"), + SSSE3 = dict(flags="-mssse3"), + SSE41 = dict(flags="-msse4.1"), + POPCNT = dict(flags="-mpopcnt"), + SSE42 = dict(flags="-msse4.2"), + AVX = dict(flags="-mavx"), + F16C = dict(flags="-mf16c"), + XOP = dict(flags="-mxop"), + FMA4 = dict(flags="-mfma4"), + FMA3 = dict(flags="-mfma"), + AVX2 = dict(flags="-mavx2"), + AVX512F = dict(flags="-mavx512f -mno-mmx"), + AVX512CD = dict(flags="-mavx512cd"), + AVX512_KNL = dict(flags="-mavx512er -mavx512pf"), + AVX512_KNM = dict( + flags="-mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq" + ), + AVX512_SKX = dict(flags="-mavx512vl -mavx512bw -mavx512dq"), + AVX512_CLX = dict(flags="-mavx512vnni"), + AVX512_CNL = dict(flags="-mavx512ifma -mavx512vbmi"), + AVX512_ICL = dict( + flags="-mavx512vbmi2 -mavx512bitalg -mavx512vpopcntdq" + ), + AVX512_SPR = dict(flags="-mavx512fp16"), + ) + if on_x86 and self.cc_is_icc: return dict( + SSE = dict(flags="-msse"), + SSE2 = dict(flags="-msse2"), + SSE3 = dict(flags="-msse3"), + SSSE3 = dict(flags="-mssse3"), + SSE41 = dict(flags="-msse4.1"), + POPCNT = {}, + SSE42 = dict(flags="-msse4.2"), + AVX = dict(flags="-mavx"), + F16C = {}, + XOP = dict(disable="Intel Compiler doesn't support it"), + FMA4 = dict(disable="Intel Compiler doesn't support it"), + # Intel Compiler doesn't support AVX2 or FMA3 independently + FMA3 = dict( + implies="F16C AVX2", flags="-march=core-avx2" + ), + AVX2 = dict(implies="FMA3", flags="-march=core-avx2"), + # Intel Compiler doesn't support AVX512F or AVX512CD independently + AVX512F = dict( + implies="AVX2 AVX512CD", flags="-march=common-avx512" + ), + AVX512CD = dict( + implies="AVX2 AVX512F", flags="-march=common-avx512" + ), + AVX512_KNL = dict(flags="-xKNL"), + AVX512_KNM = dict(flags="-xKNM"), + AVX512_SKX = dict(flags="-xSKYLAKE-AVX512"), + AVX512_CLX = dict(flags="-xCASCADELAKE"), + AVX512_CNL = dict(flags="-xCANNONLAKE"), + AVX512_ICL = dict(flags="-xICELAKE-CLIENT"), + AVX512_SPR = dict(disable="Not supported yet") + ) + if on_x86 and self.cc_is_iccw: return dict( + SSE = dict(flags="/arch:SSE"), + SSE2 = dict(flags="/arch:SSE2"), + SSE3 = dict(flags="/arch:SSE3"), + SSSE3 = dict(flags="/arch:SSSE3"), + SSE41 = dict(flags="/arch:SSE4.1"), + POPCNT = {}, + SSE42 = dict(flags="/arch:SSE4.2"), + AVX = dict(flags="/arch:AVX"), + F16C = {}, + XOP = dict(disable="Intel Compiler doesn't support it"), + FMA4 = dict(disable="Intel Compiler doesn't support it"), + # Intel Compiler doesn't support FMA3 or AVX2 independently + FMA3 = dict( + implies="F16C AVX2", flags="/arch:CORE-AVX2" + ), + AVX2 = dict( + implies="FMA3", flags="/arch:CORE-AVX2" + ), + # Intel Compiler doesn't support AVX512F or AVX512CD independently + AVX512F = dict( + implies="AVX2 AVX512CD", flags="/Qx:COMMON-AVX512" + ), + AVX512CD = dict( + implies="AVX2 AVX512F", flags="/Qx:COMMON-AVX512" + ), + AVX512_KNL = dict(flags="/Qx:KNL"), + AVX512_KNM = dict(flags="/Qx:KNM"), + AVX512_SKX = dict(flags="/Qx:SKYLAKE-AVX512"), + AVX512_CLX = dict(flags="/Qx:CASCADELAKE"), + AVX512_CNL = dict(flags="/Qx:CANNONLAKE"), + AVX512_ICL = dict(flags="/Qx:ICELAKE-CLIENT"), + AVX512_SPR = dict(disable="Not supported yet") + ) + if on_x86 and self.cc_is_msvc: return dict( + SSE = dict(flags="/arch:SSE") if self.cc_on_x86 else {}, + SSE2 = dict(flags="/arch:SSE2") if self.cc_on_x86 else {}, + SSE3 = {}, + SSSE3 = {}, + SSE41 = {}, + POPCNT = dict(headers="nmmintrin.h"), + SSE42 = {}, + AVX = dict(flags="/arch:AVX"), + F16C = {}, + XOP = dict(headers="ammintrin.h"), + FMA4 = dict(headers="ammintrin.h"), + # MSVC doesn't support FMA3 or AVX2 independently + FMA3 = dict( + implies="F16C AVX2", flags="/arch:AVX2" + ), + AVX2 = dict( + implies="F16C FMA3", flags="/arch:AVX2" + ), + # MSVC doesn't support AVX512F or AVX512CD independently, + # always generate instructions belong to (VL/VW/DQ) + AVX512F = dict( + implies="AVX2 AVX512CD AVX512_SKX", flags="/arch:AVX512" + ), + AVX512CD = dict( + implies="AVX512F AVX512_SKX", flags="/arch:AVX512" + ), + AVX512_KNL = dict( + disable="MSVC compiler doesn't support it" + ), + AVX512_KNM = dict( + disable="MSVC compiler doesn't support it" + ), + AVX512_SKX = dict(flags="/arch:AVX512"), + AVX512_CLX = {}, + AVX512_CNL = {}, + AVX512_ICL = {}, + AVX512_SPR= dict( + disable="MSVC compiler doesn't support it" + ) + ) + + on_power = self.cc_on_ppc64le or self.cc_on_ppc64 + if on_power: + partial = dict( + VSX = dict( + implies=("VSX2" if self.cc_on_ppc64le else ""), + flags="-mvsx" + ), + VSX2 = dict( + flags="-mcpu=power8", implies_detect=False + ), + VSX3 = dict( + flags="-mcpu=power9 -mtune=power9", implies_detect=False + ), + VSX4 = dict( + flags="-mcpu=power10 -mtune=power10", implies_detect=False + ) + ) + if self.cc_is_clang: + partial["VSX"]["flags"] = "-maltivec -mvsx" + partial["VSX2"]["flags"] = "-mcpu=power8" + partial["VSX3"]["flags"] = "-mcpu=power9" + partial["VSX4"]["flags"] = "-mcpu=power10" + + return partial + + on_zarch = self.cc_on_s390x + if on_zarch: + partial = dict( + VX = dict( + flags="-march=arch11 -mzvector" + ), + VXE = dict( + flags="-march=arch12", implies_detect=False + ), + VXE2 = dict( + flags="-march=arch13", implies_detect=False + ) + ) + + return partial + + + if self.cc_on_aarch64 and is_unix: return dict( + NEON = dict( + implies="NEON_FP16 NEON_VFPV4 ASIMD", autovec=True + ), + NEON_FP16 = dict( + implies="NEON NEON_VFPV4 ASIMD", autovec=True + ), + NEON_VFPV4 = dict( + implies="NEON NEON_FP16 ASIMD", autovec=True + ), + ASIMD = dict( + implies="NEON NEON_FP16 NEON_VFPV4", autovec=True + ), + ASIMDHP = dict( + flags="-march=armv8.2-a+fp16" + ), + ASIMDDP = dict( + flags="-march=armv8.2-a+dotprod" + ), + ASIMDFHM = dict( + flags="-march=armv8.2-a+fp16fml" + ), + ) + if self.cc_on_armhf and is_unix: return dict( + NEON = dict( + flags="-mfpu=neon" + ), + NEON_FP16 = dict( + flags="-mfpu=neon-fp16 -mfp16-format=ieee" + ), + NEON_VFPV4 = dict( + flags="-mfpu=neon-vfpv4", + ), + ASIMD = dict( + flags="-mfpu=neon-fp-armv8 -march=armv8-a+simd", + ), + ASIMDHP = dict( + flags="-march=armv8.2-a+fp16" + ), + ASIMDDP = dict( + flags="-march=armv8.2-a+dotprod", + ), + ASIMDFHM = dict( + flags="-march=armv8.2-a+fp16fml" + ) + ) + # TODO: ARM MSVC + return {} + + def __init__(self): + if self.conf_tmp_path is None: + import shutil + import tempfile + tmp = tempfile.mkdtemp() + def rm_temp(): + try: + shutil.rmtree(tmp) + except OSError: + pass + atexit.register(rm_temp) + self.conf_tmp_path = tmp + + if self.conf_cache_factors is None: + self.conf_cache_factors = [ + os.path.getmtime(__file__), + self.conf_nocache + ] + +class _Distutils: + """A helper class that provides a collection of fundamental methods + implemented in a top of Python and NumPy Distutils. + + The idea behind this class is to gather all methods that it may + need to override in case of reuse 'CCompilerOpt' in environment + different than of what NumPy has. + + Parameters + ---------- + ccompiler : `CCompiler` + The generate instance that returned from `distutils.ccompiler.new_compiler()`. + """ + def __init__(self, ccompiler): + self._ccompiler = ccompiler + + def dist_compile(self, sources, flags, ccompiler=None, **kwargs): + """Wrap CCompiler.compile()""" + assert(isinstance(sources, list)) + assert(isinstance(flags, list)) + flags = kwargs.pop("extra_postargs", []) + flags + if not ccompiler: + ccompiler = self._ccompiler + + return ccompiler.compile(sources, extra_postargs=flags, **kwargs) + + def dist_test(self, source, flags, macros=[]): + """Return True if 'CCompiler.compile()' able to compile + a source file with certain flags. + """ + assert(isinstance(source, str)) + from distutils.errors import CompileError + cc = self._ccompiler; + bk_spawn = getattr(cc, 'spawn', None) + if bk_spawn: + cc_type = getattr(self._ccompiler, "compiler_type", "") + if cc_type in ("msvc",): + setattr(cc, 'spawn', self._dist_test_spawn_paths) + else: + setattr(cc, 'spawn', self._dist_test_spawn) + test = False + try: + self.dist_compile( + [source], flags, macros=macros, output_dir=self.conf_tmp_path + ) + test = True + except CompileError as e: + self.dist_log(str(e), stderr=True) + if bk_spawn: + setattr(cc, 'spawn', bk_spawn) + return test + + def dist_info(self): + """ + Return a tuple containing info about (platform, compiler, extra_args), + required by the abstract class '_CCompiler' for discovering the + platform environment. This is also used as a cache factor in order + to detect any changes happening from outside. + """ + if hasattr(self, "_dist_info"): + return self._dist_info + + cc_type = getattr(self._ccompiler, "compiler_type", '') + if cc_type in ("intelem", "intelemw"): + platform = "x86_64" + elif cc_type in ("intel", "intelw", "intele"): + platform = "x86" + else: + from distutils.util import get_platform + platform = get_platform() + + cc_info = getattr(self._ccompiler, "compiler", getattr(self._ccompiler, "compiler_so", '')) + if not cc_type or cc_type == "unix": + if hasattr(cc_info, "__iter__"): + compiler = cc_info[0] + else: + compiler = str(cc_info) + else: + compiler = cc_type + + if hasattr(cc_info, "__iter__") and len(cc_info) > 1: + extra_args = ' '.join(cc_info[1:]) + else: + extra_args = os.environ.get("CFLAGS", "") + extra_args += os.environ.get("CPPFLAGS", "") + + self._dist_info = (platform, compiler, extra_args) + return self._dist_info + + @staticmethod + def dist_error(*args): + """Raise a compiler error""" + from distutils.errors import CompileError + raise CompileError(_Distutils._dist_str(*args)) + + @staticmethod + def dist_fatal(*args): + """Raise a distutils error""" + from distutils.errors import DistutilsError + raise DistutilsError(_Distutils._dist_str(*args)) + + @staticmethod + def dist_log(*args, stderr=False): + """Print a console message""" + from numpy.distutils import log + out = _Distutils._dist_str(*args) + if stderr: + log.warn(out) + else: + log.info(out) + + @staticmethod + def dist_load_module(name, path): + """Load a module from file, required by the abstract class '_Cache'.""" + from .misc_util import exec_mod_from_location + try: + return exec_mod_from_location(name, path) + except Exception as e: + _Distutils.dist_log(e, stderr=True) + return None + + @staticmethod + def _dist_str(*args): + """Return a string to print by log and errors.""" + def to_str(arg): + if not isinstance(arg, str) and hasattr(arg, '__iter__'): + ret = [] + for a in arg: + ret.append(to_str(a)) + return '('+ ' '.join(ret) + ')' + return str(arg) + + stack = inspect.stack()[2] + start = "CCompilerOpt.%s[%d] : " % (stack.function, stack.lineno) + out = ' '.join([ + to_str(a) + for a in (*args,) + ]) + return start + out + + def _dist_test_spawn_paths(self, cmd, display=None): + """ + Fix msvc SDK ENV path same as distutils do + without it we get c1: fatal error C1356: unable to find mspdbcore.dll + """ + if not hasattr(self._ccompiler, "_paths"): + self._dist_test_spawn(cmd) + return + old_path = os.getenv("path") + try: + os.environ["path"] = self._ccompiler._paths + self._dist_test_spawn(cmd) + finally: + os.environ["path"] = old_path + + _dist_warn_regex = re.compile( + # intel and msvc compilers don't raise + # fatal errors when flags are wrong or unsupported + ".*(" + "warning D9002|" # msvc, it should be work with any language. + "invalid argument for option" # intel + ").*" + ) + @staticmethod + def _dist_test_spawn(cmd, display=None): + try: + o = subprocess.check_output(cmd, stderr=subprocess.STDOUT, + text=True) + if o and re.match(_Distutils._dist_warn_regex, o): + _Distutils.dist_error( + "Flags in command", cmd ,"aren't supported by the compiler" + ", output -> \n%s" % o + ) + except subprocess.CalledProcessError as exc: + o = exc.output + s = exc.returncode + except OSError as e: + o = e + s = 127 + else: + return None + _Distutils.dist_error( + "Command", cmd, "failed with exit status %d output -> \n%s" % ( + s, o + )) + +_share_cache = {} +class _Cache: + """An abstract class handles caching functionality, provides two + levels of caching, in-memory by share instances attributes among + each other and by store attributes into files. + + **Note**: + any attributes that start with ``_`` or ``conf_`` will be ignored. + + Parameters + ---------- + cache_path : str or None + The path of cache file, if None then cache in file will disabled. + + *factors : + The caching factors that need to utilize next to `conf_cache_factors`. + + Attributes + ---------- + cache_private : set + Hold the attributes that need be skipped from "in-memory cache". + + cache_infile : bool + Utilized during initializing this class, to determine if the cache was able + to loaded from the specified cache path in 'cache_path'. + """ + + # skip attributes from cache + _cache_ignore = re.compile("^(_|conf_)") + + def __init__(self, cache_path=None, *factors): + self.cache_me = {} + self.cache_private = set() + self.cache_infile = False + self._cache_path = None + + if self.conf_nocache: + self.dist_log("cache is disabled by `Config`") + return + + self._cache_hash = self.cache_hash(*factors, *self.conf_cache_factors) + self._cache_path = cache_path + if cache_path: + if os.path.exists(cache_path): + self.dist_log("load cache from file ->", cache_path) + cache_mod = self.dist_load_module("cache", cache_path) + if not cache_mod: + self.dist_log( + "unable to load the cache file as a module", + stderr=True + ) + elif not hasattr(cache_mod, "hash") or \ + not hasattr(cache_mod, "data"): + self.dist_log("invalid cache file", stderr=True) + elif self._cache_hash == cache_mod.hash: + self.dist_log("hit the file cache") + for attr, val in cache_mod.data.items(): + setattr(self, attr, val) + self.cache_infile = True + else: + self.dist_log("miss the file cache") + + if not self.cache_infile: + other_cache = _share_cache.get(self._cache_hash) + if other_cache: + self.dist_log("hit the memory cache") + for attr, val in other_cache.__dict__.items(): + if attr in other_cache.cache_private or \ + re.match(self._cache_ignore, attr): + continue + setattr(self, attr, val) + + _share_cache[self._cache_hash] = self + atexit.register(self.cache_flush) + + def __del__(self): + for h, o in _share_cache.items(): + if o == self: + _share_cache.pop(h) + break + + def cache_flush(self): + """ + Force update the cache. + """ + if not self._cache_path: + return + # TODO: don't write if the cache doesn't change + self.dist_log("write cache to path ->", self._cache_path) + cdict = self.__dict__.copy() + for attr in self.__dict__.keys(): + if re.match(self._cache_ignore, attr): + cdict.pop(attr) + + d = os.path.dirname(self._cache_path) + if not os.path.exists(d): + os.makedirs(d) + + repr_dict = pprint.pformat(cdict, compact=True) + with open(self._cache_path, "w") as f: + f.write(textwrap.dedent("""\ + # AUTOGENERATED DON'T EDIT + # Please make changes to the code generator \ + (distutils/ccompiler_opt.py) + hash = {} + data = \\ + """).format(self._cache_hash)) + f.write(repr_dict) + + def cache_hash(self, *factors): + # is there a built-in non-crypto hash? + # sdbm + chash = 0 + for f in factors: + for char in str(f): + chash = ord(char) + (chash << 6) + (chash << 16) - chash + chash &= 0xFFFFFFFF + return chash + + @staticmethod + def me(cb): + """ + A static method that can be treated as a decorator to + dynamically cache certain methods. + """ + def cache_wrap_me(self, *args, **kwargs): + # good for normal args + cache_key = str(( + cb.__name__, *args, *kwargs.keys(), *kwargs.values() + )) + if cache_key in self.cache_me: + return self.cache_me[cache_key] + ccb = cb(self, *args, **kwargs) + self.cache_me[cache_key] = ccb + return ccb + return cache_wrap_me + +class _CCompiler: + """A helper class for `CCompilerOpt` containing all utilities that + related to the fundamental compiler's functions. + + Attributes + ---------- + cc_on_x86 : bool + True when the target architecture is 32-bit x86 + cc_on_x64 : bool + True when the target architecture is 64-bit x86 + cc_on_ppc64 : bool + True when the target architecture is 64-bit big-endian powerpc + cc_on_ppc64le : bool + True when the target architecture is 64-bit litle-endian powerpc + cc_on_s390x : bool + True when the target architecture is IBM/ZARCH on linux + cc_on_armhf : bool + True when the target architecture is 32-bit ARMv7+ + cc_on_aarch64 : bool + True when the target architecture is 64-bit Armv8-a+ + cc_on_noarch : bool + True when the target architecture is unknown or not supported + cc_is_gcc : bool + True if the compiler is GNU or + if the compiler is unknown + cc_is_clang : bool + True if the compiler is Clang + cc_is_icc : bool + True if the compiler is Intel compiler (unix like) + cc_is_iccw : bool + True if the compiler is Intel compiler (msvc like) + cc_is_nocc : bool + True if the compiler isn't supported directly, + Note: that cause a fail-back to gcc + cc_has_debug : bool + True if the compiler has debug flags + cc_has_native : bool + True if the compiler has native flags + cc_noopt : bool + True if the compiler has definition 'DISABLE_OPT*', + or 'cc_on_noarch' is True + cc_march : str + The target architecture name, or "unknown" if + the architecture isn't supported + cc_name : str + The compiler name, or "unknown" if the compiler isn't supported + cc_flags : dict + Dictionary containing the initialized flags of `_Config.conf_cc_flags` + """ + def __init__(self): + if hasattr(self, "cc_is_cached"): + return + # attr regex compiler-expression + detect_arch = ( + ("cc_on_x64", ".*(x|x86_|amd)64.*", ""), + ("cc_on_x86", ".*(win32|x86|i386|i686).*", ""), + ("cc_on_ppc64le", ".*(powerpc|ppc)64(el|le).*|.*powerpc.*", + "defined(__powerpc64__) && " + "defined(__LITTLE_ENDIAN__)"), + ("cc_on_ppc64", ".*(powerpc|ppc).*|.*powerpc.*", + "defined(__powerpc64__) && " + "defined(__BIG_ENDIAN__)"), + ("cc_on_aarch64", ".*(aarch64|arm64).*", ""), + ("cc_on_armhf", ".*arm.*", "defined(__ARM_ARCH_7__) || " + "defined(__ARM_ARCH_7A__)"), + ("cc_on_s390x", ".*s390x.*", ""), + # undefined platform + ("cc_on_noarch", "", ""), + ) + detect_compiler = ( + ("cc_is_gcc", r".*(gcc|gnu\-g).*", ""), + ("cc_is_clang", ".*clang.*", ""), + # intel msvc like + ("cc_is_iccw", ".*(intelw|intelemw|iccw).*", ""), + ("cc_is_icc", ".*(intel|icc).*", ""), # intel unix like + ("cc_is_msvc", ".*msvc.*", ""), + ("cc_is_fcc", ".*fcc.*", ""), + # undefined compiler will be treat it as gcc + ("cc_is_nocc", "", ""), + ) + detect_args = ( + ("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""), + ("cc_has_native", + ".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""), + # in case if the class run with -DNPY_DISABLE_OPTIMIZATION + ("cc_noopt", ".*DISABLE_OPT.*", ""), + ) + + dist_info = self.dist_info() + platform, compiler_info, extra_args = dist_info + # set False to all attrs + for section in (detect_arch, detect_compiler, detect_args): + for attr, rgex, cexpr in section: + setattr(self, attr, False) + + for detect, searchin in ((detect_arch, platform), (detect_compiler, compiler_info)): + for attr, rgex, cexpr in detect: + if rgex and not re.match(rgex, searchin, re.IGNORECASE): + continue + if cexpr and not self.cc_test_cexpr(cexpr): + continue + setattr(self, attr, True) + break + + for attr, rgex, cexpr in detect_args: + if rgex and not re.match(rgex, extra_args, re.IGNORECASE): + continue + if cexpr and not self.cc_test_cexpr(cexpr): + continue + setattr(self, attr, True) + + if self.cc_on_noarch: + self.dist_log( + "unable to detect CPU architecture which lead to disable the optimization. " + f"check dist_info:<<\n{dist_info}\n>>", + stderr=True + ) + self.cc_noopt = True + + if self.conf_noopt: + self.dist_log("Optimization is disabled by the Config", stderr=True) + self.cc_noopt = True + + if self.cc_is_nocc: + """ + mingw can be treated as a gcc, and also xlc even if it based on clang, + but still has the same gcc optimization flags. + """ + self.dist_log( + "unable to detect compiler type which leads to treating it as GCC. " + "this is a normal behavior if you're using gcc-like compiler such as MinGW or IBM/XLC." + f"check dist_info:<<\n{dist_info}\n>>", + stderr=True + ) + self.cc_is_gcc = True + + self.cc_march = "unknown" + for arch in ("x86", "x64", "ppc64", "ppc64le", + "armhf", "aarch64", "s390x"): + if getattr(self, "cc_on_" + arch): + self.cc_march = arch + break + + self.cc_name = "unknown" + for name in ("gcc", "clang", "iccw", "icc", "msvc", "fcc"): + if getattr(self, "cc_is_" + name): + self.cc_name = name + break + + self.cc_flags = {} + compiler_flags = self.conf_cc_flags.get(self.cc_name) + if compiler_flags is None: + self.dist_fatal( + "undefined flag for compiler '%s', " + "leave an empty dict instead" % self.cc_name + ) + for name, flags in compiler_flags.items(): + self.cc_flags[name] = nflags = [] + if flags: + assert(isinstance(flags, str)) + flags = flags.split() + for f in flags: + if self.cc_test_flags([f]): + nflags.append(f) + + self.cc_is_cached = True + + @_Cache.me + def cc_test_flags(self, flags): + """ + Returns True if the compiler supports 'flags'. + """ + assert(isinstance(flags, list)) + self.dist_log("testing flags", flags) + test_path = os.path.join(self.conf_check_path, "test_flags.c") + test = self.dist_test(test_path, flags) + if not test: + self.dist_log("testing failed", stderr=True) + return test + + @_Cache.me + def cc_test_cexpr(self, cexpr, flags=[]): + """ + Same as the above but supports compile-time expressions. + """ + self.dist_log("testing compiler expression", cexpr) + test_path = os.path.join(self.conf_tmp_path, "npy_dist_test_cexpr.c") + with open(test_path, "w") as fd: + fd.write(textwrap.dedent(f"""\ + #if !({cexpr}) + #error "unsupported expression" + #endif + int dummy; + """)) + test = self.dist_test(test_path, flags) + if not test: + self.dist_log("testing failed", stderr=True) + return test + + def cc_normalize_flags(self, flags): + """ + Remove the conflicts that caused due gathering implied features flags. + + Parameters + ---------- + 'flags' list, compiler flags + flags should be sorted from the lowest to the highest interest. + + Returns + ------- + list, filtered from any conflicts. + + Examples + -------- + >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) + ['armv8.2-a+fp16+dotprod'] + + >>> self.cc_normalize_flags( + ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] + ) + ['-march=core-avx2'] + """ + assert(isinstance(flags, list)) + if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: + return self._cc_normalize_unix(flags) + + if self.cc_is_msvc or self.cc_is_iccw: + return self._cc_normalize_win(flags) + return flags + + _cc_normalize_unix_mrgx = re.compile( + # 1- to check the highest of + r"^(-mcpu=|-march=|-x[A-Z0-9\-])" + ) + _cc_normalize_unix_frgx = re.compile( + # 2- to remove any flags starts with + # -march, -mcpu, -x(INTEL) and '-m' without '=' + r"^(?!(-mcpu=|-march=|-x[A-Z0-9\-]|-m[a-z0-9\-\.]*.$))|" + # exclude: + r"(?:-mzvector)" + ) + _cc_normalize_unix_krgx = re.compile( + # 3- keep only the highest of + r"^(-mfpu|-mtune)" + ) + _cc_normalize_arch_ver = re.compile( + r"[0-9.]" + ) + def _cc_normalize_unix(self, flags): + def ver_flags(f): + # arch ver subflag + # -march=armv8.2-a+fp16fml + tokens = f.split('+') + ver = float('0' + ''.join( + re.findall(self._cc_normalize_arch_ver, tokens[0]) + )) + return ver, tokens[0], tokens[1:] + + if len(flags) <= 1: + return flags + # get the highest matched flag + for i, cur_flag in enumerate(reversed(flags)): + if not re.match(self._cc_normalize_unix_mrgx, cur_flag): + continue + lower_flags = flags[:-(i+1)] + upper_flags = flags[-i:] + filtered = list(filter( + self._cc_normalize_unix_frgx.search, lower_flags + )) + # gather subflags + ver, arch, subflags = ver_flags(cur_flag) + if ver > 0 and len(subflags) > 0: + for xflag in lower_flags: + xver, _, xsubflags = ver_flags(xflag) + if ver == xver: + subflags = xsubflags + subflags + cur_flag = arch + '+' + '+'.join(subflags) + + flags = filtered + [cur_flag] + if i > 0: + flags += upper_flags + break + + # to remove overridable flags + final_flags = [] + matched = set() + for f in reversed(flags): + match = re.match(self._cc_normalize_unix_krgx, f) + if not match: + pass + elif match[0] in matched: + continue + else: + matched.add(match[0]) + final_flags.insert(0, f) + return final_flags + + _cc_normalize_win_frgx = re.compile( + r"^(?!(/arch\:|/Qx\:))" + ) + _cc_normalize_win_mrgx = re.compile( + r"^(/arch|/Qx:)" + ) + def _cc_normalize_win(self, flags): + for i, f in enumerate(reversed(flags)): + if not re.match(self._cc_normalize_win_mrgx, f): + continue + i += 1 + return list(filter( + self._cc_normalize_win_frgx.search, flags[:-i] + )) + flags[-i:] + return flags + +class _Feature: + """A helper class for `CCompilerOpt` that managing CPU features. + + Attributes + ---------- + feature_supported : dict + Dictionary containing all CPU features that supported + by the platform, according to the specified values in attribute + `_Config.conf_features` and `_Config.conf_features_partial()` + + feature_min : set + The minimum support of CPU features, according to + the specified values in attribute `_Config.conf_min_features`. + """ + def __init__(self): + if hasattr(self, "feature_is_cached"): + return + self.feature_supported = pfeatures = self.conf_features_partial() + for feature_name in list(pfeatures.keys()): + feature = pfeatures[feature_name] + cfeature = self.conf_features[feature_name] + feature.update({ + k:v for k,v in cfeature.items() if k not in feature + }) + disabled = feature.get("disable") + if disabled is not None: + pfeatures.pop(feature_name) + self.dist_log( + "feature '%s' is disabled," % feature_name, + disabled, stderr=True + ) + continue + # list is used internally for these options + for option in ( + "implies", "group", "detect", "headers", "flags", "extra_checks" + ) : + oval = feature.get(option) + if isinstance(oval, str): + feature[option] = oval.split() + + self.feature_min = set() + min_f = self.conf_min_features.get(self.cc_march, "") + for F in min_f.upper().split(): + if F in self.feature_supported: + self.feature_min.add(F) + + self.feature_is_cached = True + + def feature_names(self, names=None, force_flags=None, macros=[]): + """ + Returns a set of CPU feature names that supported by platform and the **C** compiler. + + Parameters + ---------- + names : sequence or None, optional + Specify certain CPU features to test it against the **C** compiler. + if None(default), it will test all current supported features. + **Note**: feature names must be in upper-case. + + force_flags : list or None, optional + If None(default), default compiler flags for every CPU feature will + be used during the test. + + macros : list of tuples, optional + A list of C macro definitions. + """ + assert( + names is None or ( + not isinstance(names, str) and + hasattr(names, "__iter__") + ) + ) + assert(force_flags is None or isinstance(force_flags, list)) + if names is None: + names = self.feature_supported.keys() + supported_names = set() + for f in names: + if self.feature_is_supported( + f, force_flags=force_flags, macros=macros + ): + supported_names.add(f) + return supported_names + + def feature_is_exist(self, name): + """ + Returns True if a certain feature is exist and covered within + ``_Config.conf_features``. + + Parameters + ---------- + 'name': str + feature name in uppercase. + """ + assert(name.isupper()) + return name in self.conf_features + + def feature_sorted(self, names, reverse=False): + """ + Sort a list of CPU features ordered by the lowest interest. + + Parameters + ---------- + 'names': sequence + sequence of supported feature names in uppercase. + 'reverse': bool, optional + If true, the sorted features is reversed. (highest interest) + + Returns + ------- + list, sorted CPU features + """ + def sort_cb(k): + if isinstance(k, str): + return self.feature_supported[k]["interest"] + # multiple features + rank = max([self.feature_supported[f]["interest"] for f in k]) + # FIXME: that's not a safe way to increase the rank for + # multi targets + rank += len(k) -1 + return rank + return sorted(names, reverse=reverse, key=sort_cb) + + def feature_implies(self, names, keep_origins=False): + """ + Return a set of CPU features that implied by 'names' + + Parameters + ---------- + names : str or sequence of str + CPU feature name(s) in uppercase. + + keep_origins : bool + if False(default) then the returned set will not contain any + features from 'names'. This case happens only when two features + imply each other. + + Examples + -------- + >>> self.feature_implies("SSE3") + {'SSE', 'SSE2'} + >>> self.feature_implies("SSE2") + {'SSE'} + >>> self.feature_implies("SSE2", keep_origins=True) + # 'SSE2' found here since 'SSE' and 'SSE2' imply each other + {'SSE', 'SSE2'} + """ + def get_implies(name, _caller=set()): + implies = set() + d = self.feature_supported[name] + for i in d.get("implies", []): + implies.add(i) + if i in _caller: + # infinity recursive guard since + # features can imply each other + continue + _caller.add(name) + implies = implies.union(get_implies(i, _caller)) + return implies + + if isinstance(names, str): + implies = get_implies(names) + names = [names] + else: + assert(hasattr(names, "__iter__")) + implies = set() + for n in names: + implies = implies.union(get_implies(n)) + if not keep_origins: + implies.difference_update(names) + return implies + + def feature_implies_c(self, names): + """same as feature_implies() but combining 'names'""" + if isinstance(names, str): + names = set((names,)) + else: + names = set(names) + return names.union(self.feature_implies(names)) + + def feature_ahead(self, names): + """ + Return list of features in 'names' after remove any + implied features and keep the origins. + + Parameters + ---------- + 'names': sequence + sequence of CPU feature names in uppercase. + + Returns + ------- + list of CPU features sorted as-is 'names' + + Examples + -------- + >>> self.feature_ahead(["SSE2", "SSE3", "SSE41"]) + ["SSE41"] + # assume AVX2 and FMA3 implies each other and AVX2 + # is the highest interest + >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) + ["AVX2"] + # assume AVX2 and FMA3 don't implies each other + >>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"]) + ["AVX2", "FMA3"] + """ + assert( + not isinstance(names, str) + and hasattr(names, '__iter__') + ) + implies = self.feature_implies(names, keep_origins=True) + ahead = [n for n in names if n not in implies] + if len(ahead) == 0: + # return the highest interested feature + # if all features imply each other + ahead = self.feature_sorted(names, reverse=True)[:1] + return ahead + + def feature_untied(self, names): + """ + same as 'feature_ahead()' but if both features implied each other + and keep the highest interest. + + Parameters + ---------- + 'names': sequence + sequence of CPU feature names in uppercase. + + Returns + ------- + list of CPU features sorted as-is 'names' + + Examples + -------- + >>> self.feature_untied(["SSE2", "SSE3", "SSE41"]) + ["SSE2", "SSE3", "SSE41"] + # assume AVX2 and FMA3 implies each other + >>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"]) + ["SSE2", "SSE3", "SSE41", "AVX2"] + """ + assert( + not isinstance(names, str) + and hasattr(names, '__iter__') + ) + final = [] + for n in names: + implies = self.feature_implies(n) + tied = [ + nn for nn in final + if nn in implies and n in self.feature_implies(nn) + ] + if tied: + tied = self.feature_sorted(tied + [n]) + if n not in tied[1:]: + continue + final.remove(tied[:1][0]) + final.append(n) + return final + + def feature_get_til(self, names, keyisfalse): + """ + same as `feature_implies_c()` but stop collecting implied + features when feature's option that provided through + parameter 'keyisfalse' is False, also sorting the returned + features. + """ + def til(tnames): + # sort from highest to lowest interest then cut if "key" is False + tnames = self.feature_implies_c(tnames) + tnames = self.feature_sorted(tnames, reverse=True) + for i, n in enumerate(tnames): + if not self.feature_supported[n].get(keyisfalse, True): + tnames = tnames[:i+1] + break + return tnames + + if isinstance(names, str) or len(names) <= 1: + names = til(names) + # normalize the sort + names.reverse() + return names + + names = self.feature_ahead(names) + names = {t for n in names for t in til(n)} + return self.feature_sorted(names) + + def feature_detect(self, names): + """ + Return a list of CPU features that required to be detected + sorted from the lowest to highest interest. + """ + names = self.feature_get_til(names, "implies_detect") + detect = [] + for n in names: + d = self.feature_supported[n] + detect += d.get("detect", d.get("group", [n])) + return detect + + @_Cache.me + def feature_flags(self, names): + """ + Return a list of CPU features flags sorted from the lowest + to highest interest. + """ + names = self.feature_sorted(self.feature_implies_c(names)) + flags = [] + for n in names: + d = self.feature_supported[n] + f = d.get("flags", []) + if not f or not self.cc_test_flags(f): + continue + flags += f + return self.cc_normalize_flags(flags) + + @_Cache.me + def feature_test(self, name, force_flags=None, macros=[]): + """ + Test a certain CPU feature against the compiler through its own + check file. + + Parameters + ---------- + name : str + Supported CPU feature name. + + force_flags : list or None, optional + If None(default), the returned flags from `feature_flags()` + will be used. + + macros : list of tuples, optional + A list of C macro definitions. + """ + if force_flags is None: + force_flags = self.feature_flags(name) + + self.dist_log( + "testing feature '%s' with flags (%s)" % ( + name, ' '.join(force_flags) + )) + # Each CPU feature must have C source code contains at + # least one intrinsic or instruction related to this feature. + test_path = os.path.join( + self.conf_check_path, "cpu_%s.c" % name.lower() + ) + if not os.path.exists(test_path): + self.dist_fatal("feature test file is not exist", test_path) + + test = self.dist_test( + test_path, force_flags + self.cc_flags["werror"], macros=macros + ) + if not test: + self.dist_log("testing failed", stderr=True) + return test + + @_Cache.me + def feature_is_supported(self, name, force_flags=None, macros=[]): + """ + Check if a certain CPU feature is supported by the platform and compiler. + + Parameters + ---------- + name : str + CPU feature name in uppercase. + + force_flags : list or None, optional + If None(default), default compiler flags for every CPU feature will + be used during test. + + macros : list of tuples, optional + A list of C macro definitions. + """ + assert(name.isupper()) + assert(force_flags is None or isinstance(force_flags, list)) + + supported = name in self.feature_supported + if supported: + for impl in self.feature_implies(name): + if not self.feature_test(impl, force_flags, macros=macros): + return False + if not self.feature_test(name, force_flags, macros=macros): + return False + return supported + + @_Cache.me + def feature_can_autovec(self, name): + """ + check if the feature can be auto-vectorized by the compiler + """ + assert(isinstance(name, str)) + d = self.feature_supported[name] + can = d.get("autovec", None) + if can is None: + valid_flags = [ + self.cc_test_flags([f]) for f in d.get("flags", []) + ] + can = valid_flags and any(valid_flags) + return can + + @_Cache.me + def feature_extra_checks(self, name): + """ + Return a list of supported extra checks after testing them against + the compiler. + + Parameters + ---------- + names : str + CPU feature name in uppercase. + """ + assert isinstance(name, str) + d = self.feature_supported[name] + extra_checks = d.get("extra_checks", []) + if not extra_checks: + return [] + + self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) + flags = self.feature_flags(name) + available = [] + not_available = [] + for chk in extra_checks: + test_path = os.path.join( + self.conf_check_path, "extra_%s.c" % chk.lower() + ) + if not os.path.exists(test_path): + self.dist_fatal("extra check file does not exist", test_path) + + is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) + if is_supported: + available.append(chk) + else: + not_available.append(chk) + + if not_available: + self.dist_log("testing failed for checks", not_available, stderr=True) + return available + + + def feature_c_preprocessor(self, feature_name, tabs=0): + """ + Generate C preprocessor definitions and include headers of a CPU feature. + + Parameters + ---------- + 'feature_name': str + CPU feature name in uppercase. + 'tabs': int + if > 0, align the generated strings to the right depend on number of tabs. + + Returns + ------- + str, generated C preprocessor + + Examples + -------- + >>> self.feature_c_preprocessor("SSE3") + /** SSE3 **/ + #define NPY_HAVE_SSE3 1 + #include + """ + assert(feature_name.isupper()) + feature = self.feature_supported.get(feature_name) + assert(feature is not None) + + prepr = [ + "/** %s **/" % feature_name, + "#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name) + ] + prepr += [ + "#include <%s>" % h for h in feature.get("headers", []) + ] + + extra_defs = feature.get("group", []) + extra_defs += self.feature_extra_checks(feature_name) + for edef in extra_defs: + # Guard extra definitions in case of duplicate with + # another feature + prepr += [ + "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef), + "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef), + "#endif", + ] + + if tabs > 0: + prepr = [('\t'*tabs) + l for l in prepr] + return '\n'.join(prepr) + +class _Parse: + """A helper class that parsing main arguments of `CCompilerOpt`, + also parsing configuration statements in dispatch-able sources. + + Parameters + ---------- + cpu_baseline : str or None + minimal set of required CPU features or special options. + + cpu_dispatch : str or None + dispatched set of additional CPU features or special options. + + Special options can be: + - **MIN**: Enables the minimum CPU features that utilized via `_Config.conf_min_features` + - **MAX**: Enables all supported CPU features by the Compiler and platform. + - **NATIVE**: Enables all CPU features that supported by the current machine. + - **NONE**: Enables nothing + - **Operand +/-**: remove or add features, useful with options **MAX**, **MIN** and **NATIVE**. + NOTE: operand + is only added for nominal reason. + + NOTES: + - Case-insensitive among all CPU features and special options. + - Comma or space can be used as a separator. + - If the CPU feature is not supported by the user platform or compiler, + it will be skipped rather than raising a fatal error. + - Any specified CPU features to 'cpu_dispatch' will be skipped if its part of CPU baseline features + - 'cpu_baseline' force enables implied features. + + Attributes + ---------- + parse_baseline_names : list + Final CPU baseline's feature names(sorted from low to high) + parse_baseline_flags : list + Compiler flags of baseline features + parse_dispatch_names : list + Final CPU dispatch-able feature names(sorted from low to high) + parse_target_groups : dict + Dictionary containing initialized target groups that configured + through class attribute `conf_target_groups`. + + The key is represent the group name and value is a tuple + contains three items : + - bool, True if group has the 'baseline' option. + - list, list of CPU features. + - list, list of extra compiler flags. + + """ + def __init__(self, cpu_baseline, cpu_dispatch): + self._parse_policies = dict( + # POLICY NAME, (HAVE, NOT HAVE, [DEB]) + KEEP_BASELINE = ( + None, self._parse_policy_not_keepbase, + [] + ), + KEEP_SORT = ( + self._parse_policy_keepsort, + self._parse_policy_not_keepsort, + [] + ), + MAXOPT = ( + self._parse_policy_maxopt, None, + [] + ), + WERROR = ( + self._parse_policy_werror, None, + [] + ), + AUTOVEC = ( + self._parse_policy_autovec, None, + ["MAXOPT"] + ) + ) + if hasattr(self, "parse_is_cached"): + return + + self.parse_baseline_names = [] + self.parse_baseline_flags = [] + self.parse_dispatch_names = [] + self.parse_target_groups = {} + + if self.cc_noopt: + # skip parsing baseline and dispatch args and keep parsing target groups + cpu_baseline = cpu_dispatch = None + + self.dist_log("check requested baseline") + if cpu_baseline is not None: + cpu_baseline = self._parse_arg_features("cpu_baseline", cpu_baseline) + baseline_names = self.feature_names(cpu_baseline) + self.parse_baseline_flags = self.feature_flags(baseline_names) + self.parse_baseline_names = self.feature_sorted( + self.feature_implies_c(baseline_names) + ) + + self.dist_log("check requested dispatch-able features") + if cpu_dispatch is not None: + cpu_dispatch_ = self._parse_arg_features("cpu_dispatch", cpu_dispatch) + cpu_dispatch = { + f for f in cpu_dispatch_ + if f not in self.parse_baseline_names + } + conflict_baseline = cpu_dispatch_.difference(cpu_dispatch) + self.parse_dispatch_names = self.feature_sorted( + self.feature_names(cpu_dispatch) + ) + if len(conflict_baseline) > 0: + self.dist_log( + "skip features", conflict_baseline, "since its part of baseline" + ) + + self.dist_log("initialize targets groups") + for group_name, tokens in self.conf_target_groups.items(): + self.dist_log("parse target group", group_name) + GROUP_NAME = group_name.upper() + if not tokens or not tokens.strip(): + # allow empty groups, useful in case if there's a need + # to disable certain group since '_parse_target_tokens()' + # requires at least one valid target + self.parse_target_groups[GROUP_NAME] = ( + False, [], [] + ) + continue + has_baseline, features, extra_flags = \ + self._parse_target_tokens(tokens) + self.parse_target_groups[GROUP_NAME] = ( + has_baseline, features, extra_flags + ) + + self.parse_is_cached = True + + def parse_targets(self, source): + """ + Fetch and parse configuration statements that required for + defining the targeted CPU features, statements should be declared + in the top of source in between **C** comment and start + with a special mark **@targets**. + + Configuration statements are sort of keywords representing + CPU features names, group of statements and policies, combined + together to determine the required optimization. + + Parameters + ---------- + source : str + the path of **C** source file. + + Returns + ------- + - bool, True if group has the 'baseline' option + - list, list of CPU features + - list, list of extra compiler flags + """ + self.dist_log("looking for '@targets' inside -> ", source) + # get lines between /*@targets and */ + with open(source) as fd: + tokens = "" + max_to_reach = 1000 # good enough, isn't? + start_with = "@targets" + start_pos = -1 + end_with = "*/" + end_pos = -1 + for current_line, line in enumerate(fd): + if current_line == max_to_reach: + self.dist_fatal("reached the max of lines") + break + if start_pos == -1: + start_pos = line.find(start_with) + if start_pos == -1: + continue + start_pos += len(start_with) + tokens += line + end_pos = line.find(end_with) + if end_pos != -1: + end_pos += len(tokens) - len(line) + break + + if start_pos == -1: + self.dist_fatal("expected to find '%s' within a C comment" % start_with) + if end_pos == -1: + self.dist_fatal("expected to end with '%s'" % end_with) + + tokens = tokens[start_pos:end_pos] + return self._parse_target_tokens(tokens) + + _parse_regex_arg = re.compile(r'\s|,|([+-])') + def _parse_arg_features(self, arg_name, req_features): + if not isinstance(req_features, str): + self.dist_fatal("expected a string in '%s'" % arg_name) + + final_features = set() + # space and comma can be used as a separator + tokens = list(filter(None, re.split(self._parse_regex_arg, req_features))) + append = True # append is the default + for tok in tokens: + if tok[0] in ("#", "$"): + self.dist_fatal( + arg_name, "target groups and policies " + "aren't allowed from arguments, " + "only from dispatch-able sources" + ) + if tok == '+': + append = True + continue + if tok == '-': + append = False + continue + + TOK = tok.upper() # we use upper-case internally + features_to = set() + if TOK == "NONE": + pass + elif TOK == "NATIVE": + native = self.cc_flags["native"] + if not native: + self.dist_fatal(arg_name, + "native option isn't supported by the compiler" + ) + features_to = self.feature_names( + force_flags=native, macros=[("DETECT_FEATURES", 1)] + ) + elif TOK == "MAX": + features_to = self.feature_supported.keys() + elif TOK == "MIN": + features_to = self.feature_min + else: + if TOK in self.feature_supported: + features_to.add(TOK) + else: + if not self.feature_is_exist(TOK): + self.dist_fatal(arg_name, + ", '%s' isn't a known feature or option" % tok + ) + if append: + final_features = final_features.union(features_to) + else: + final_features = final_features.difference(features_to) + + append = True # back to default + + return final_features + + _parse_regex_target = re.compile(r'\s|[*,/]|([()])') + def _parse_target_tokens(self, tokens): + assert(isinstance(tokens, str)) + final_targets = [] # to keep it sorted as specified + extra_flags = [] + has_baseline = False + + skipped = set() + policies = set() + multi_target = None + + tokens = list(filter(None, re.split(self._parse_regex_target, tokens))) + if not tokens: + self.dist_fatal("expected one token at least") + + for tok in tokens: + TOK = tok.upper() + ch = tok[0] + if ch in ('+', '-'): + self.dist_fatal( + "+/- are 'not' allowed from target's groups or @targets, " + "only from cpu_baseline and cpu_dispatch parms" + ) + elif ch == '$': + if multi_target is not None: + self.dist_fatal( + "policies aren't allowed inside multi-target '()'" + ", only CPU features" + ) + policies.add(self._parse_token_policy(TOK)) + elif ch == '#': + if multi_target is not None: + self.dist_fatal( + "target groups aren't allowed inside multi-target '()'" + ", only CPU features" + ) + has_baseline, final_targets, extra_flags = \ + self._parse_token_group(TOK, has_baseline, final_targets, extra_flags) + elif ch == '(': + if multi_target is not None: + self.dist_fatal("unclosed multi-target, missing ')'") + multi_target = set() + elif ch == ')': + if multi_target is None: + self.dist_fatal("multi-target opener '(' wasn't found") + targets = self._parse_multi_target(multi_target) + if targets is None: + skipped.add(tuple(multi_target)) + else: + if len(targets) == 1: + targets = targets[0] + if targets and targets not in final_targets: + final_targets.append(targets) + multi_target = None # back to default + else: + if TOK == "BASELINE": + if multi_target is not None: + self.dist_fatal("baseline isn't allowed inside multi-target '()'") + has_baseline = True + continue + + if multi_target is not None: + multi_target.add(TOK) + continue + + if not self.feature_is_exist(TOK): + self.dist_fatal("invalid target name '%s'" % TOK) + + is_enabled = ( + TOK in self.parse_baseline_names or + TOK in self.parse_dispatch_names + ) + if is_enabled: + if TOK not in final_targets: + final_targets.append(TOK) + continue + + skipped.add(TOK) + + if multi_target is not None: + self.dist_fatal("unclosed multi-target, missing ')'") + if skipped: + self.dist_log( + "skip targets", skipped, + "not part of baseline or dispatch-able features" + ) + + final_targets = self.feature_untied(final_targets) + + # add polices dependencies + for p in list(policies): + _, _, deps = self._parse_policies[p] + for d in deps: + if d in policies: + continue + self.dist_log( + "policy '%s' force enables '%s'" % ( + p, d + )) + policies.add(d) + + # release policies filtrations + for p, (have, nhave, _) in self._parse_policies.items(): + func = None + if p in policies: + func = have + self.dist_log("policy '%s' is ON" % p) + else: + func = nhave + if not func: + continue + has_baseline, final_targets, extra_flags = func( + has_baseline, final_targets, extra_flags + ) + + return has_baseline, final_targets, extra_flags + + def _parse_token_policy(self, token): + """validate policy token""" + if len(token) <= 1 or token[-1:] == token[0]: + self.dist_fatal("'$' must stuck in the begin of policy name") + token = token[1:] + if token not in self._parse_policies: + self.dist_fatal( + "'%s' is an invalid policy name, available policies are" % token, + self._parse_policies.keys() + ) + return token + + def _parse_token_group(self, token, has_baseline, final_targets, extra_flags): + """validate group token""" + if len(token) <= 1 or token[-1:] == token[0]: + self.dist_fatal("'#' must stuck in the begin of group name") + + token = token[1:] + ghas_baseline, gtargets, gextra_flags = self.parse_target_groups.get( + token, (False, None, []) + ) + if gtargets is None: + self.dist_fatal( + "'%s' is an invalid target group name, " % token + \ + "available target groups are", + self.parse_target_groups.keys() + ) + if ghas_baseline: + has_baseline = True + # always keep sorting as specified + final_targets += [f for f in gtargets if f not in final_targets] + extra_flags += [f for f in gextra_flags if f not in extra_flags] + return has_baseline, final_targets, extra_flags + + def _parse_multi_target(self, targets): + """validate multi targets that defined between parentheses()""" + # remove any implied features and keep the origins + if not targets: + self.dist_fatal("empty multi-target '()'") + if not all([ + self.feature_is_exist(tar) for tar in targets + ]) : + self.dist_fatal("invalid target name in multi-target", targets) + if not all([ + ( + tar in self.parse_baseline_names or + tar in self.parse_dispatch_names + ) + for tar in targets + ]) : + return None + targets = self.feature_ahead(targets) + if not targets: + return None + # force sort multi targets, so it can be comparable + targets = self.feature_sorted(targets) + targets = tuple(targets) # hashable + return targets + + def _parse_policy_not_keepbase(self, has_baseline, final_targets, extra_flags): + """skip all baseline features""" + skipped = [] + for tar in final_targets[:]: + is_base = False + if isinstance(tar, str): + is_base = tar in self.parse_baseline_names + else: + # multi targets + is_base = all([ + f in self.parse_baseline_names + for f in tar + ]) + if is_base: + skipped.append(tar) + final_targets.remove(tar) + + if skipped: + self.dist_log("skip baseline features", skipped) + + return has_baseline, final_targets, extra_flags + + def _parse_policy_keepsort(self, has_baseline, final_targets, extra_flags): + """leave a notice that $keep_sort is on""" + self.dist_log( + "policy 'keep_sort' is on, dispatch-able targets", final_targets, "\n" + "are 'not' sorted depend on the highest interest but" + "as specified in the dispatch-able source or the extra group" + ) + return has_baseline, final_targets, extra_flags + + def _parse_policy_not_keepsort(self, has_baseline, final_targets, extra_flags): + """sorted depend on the highest interest""" + final_targets = self.feature_sorted(final_targets, reverse=True) + return has_baseline, final_targets, extra_flags + + def _parse_policy_maxopt(self, has_baseline, final_targets, extra_flags): + """append the compiler optimization flags""" + if self.cc_has_debug: + self.dist_log("debug mode is detected, policy 'maxopt' is skipped.") + elif self.cc_noopt: + self.dist_log("optimization is disabled, policy 'maxopt' is skipped.") + else: + flags = self.cc_flags["opt"] + if not flags: + self.dist_log( + "current compiler doesn't support optimization flags, " + "policy 'maxopt' is skipped", stderr=True + ) + else: + extra_flags += flags + return has_baseline, final_targets, extra_flags + + def _parse_policy_werror(self, has_baseline, final_targets, extra_flags): + """force warnings to treated as errors""" + flags = self.cc_flags["werror"] + if not flags: + self.dist_log( + "current compiler doesn't support werror flags, " + "warnings will 'not' treated as errors", stderr=True + ) + else: + self.dist_log("compiler warnings are treated as errors") + extra_flags += flags + return has_baseline, final_targets, extra_flags + + def _parse_policy_autovec(self, has_baseline, final_targets, extra_flags): + """skip features that has no auto-vectorized support by compiler""" + skipped = [] + for tar in final_targets[:]: + if isinstance(tar, str): + can = self.feature_can_autovec(tar) + else: # multiple target + can = all([ + self.feature_can_autovec(t) + for t in tar + ]) + if not can: + final_targets.remove(tar) + skipped.append(tar) + + if skipped: + self.dist_log("skip non auto-vectorized features", skipped) + + return has_baseline, final_targets, extra_flags + +class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse): + """ + A helper class for `CCompiler` aims to provide extra build options + to effectively control of compiler optimizations that are directly + related to CPU features. + """ + def __init__(self, ccompiler, cpu_baseline="min", cpu_dispatch="max", cache_path=None): + _Config.__init__(self) + _Distutils.__init__(self, ccompiler) + _Cache.__init__(self, cache_path, self.dist_info(), cpu_baseline, cpu_dispatch) + _CCompiler.__init__(self) + _Feature.__init__(self) + if not self.cc_noopt and self.cc_has_native: + self.dist_log( + "native flag is specified through environment variables. " + "force cpu-baseline='native'" + ) + cpu_baseline = "native" + _Parse.__init__(self, cpu_baseline, cpu_dispatch) + # keep the requested features untouched, need it later for report + # and trace purposes + self._requested_baseline = cpu_baseline + self._requested_dispatch = cpu_dispatch + # key is the dispatch-able source and value is a tuple + # contains two items (has_baseline[boolean], dispatched-features[list]) + self.sources_status = getattr(self, "sources_status", {}) + # every instance should has a separate one + self.cache_private.add("sources_status") + # set it at the end to make sure the cache writing was done after init + # this class + self.hit_cache = hasattr(self, "hit_cache") + + def is_cached(self): + """ + Returns True if the class loaded from the cache file + """ + return self.cache_infile and self.hit_cache + + def cpu_baseline_flags(self): + """ + Returns a list of final CPU baseline compiler flags + """ + return self.parse_baseline_flags + + def cpu_baseline_names(self): + """ + return a list of final CPU baseline feature names + """ + return self.parse_baseline_names + + def cpu_dispatch_names(self): + """ + return a list of final CPU dispatch feature names + """ + return self.parse_dispatch_names + + def try_dispatch(self, sources, src_dir=None, ccompiler=None, **kwargs): + """ + Compile one or more dispatch-able sources and generates object files, + also generates abstract C config headers and macros that + used later for the final runtime dispatching process. + + The mechanism behind it is to takes each source file that specified + in 'sources' and branching it into several files depend on + special configuration statements that must be declared in the + top of each source which contains targeted CPU features, + then it compiles every branched source with the proper compiler flags. + + Parameters + ---------- + sources : list + Must be a list of dispatch-able sources file paths, + and configuration statements must be declared inside + each file. + + src_dir : str + Path of parent directory for the generated headers and wrapped sources. + If None(default) the files will generated in-place. + + ccompiler : CCompiler + Distutils `CCompiler` instance to be used for compilation. + If None (default), the provided instance during the initialization + will be used instead. + + **kwargs : any + Arguments to pass on to the `CCompiler.compile()` + + Returns + ------- + list : generated object files + + Raises + ------ + CompileError + Raises by `CCompiler.compile()` on compiling failure. + DistutilsError + Some errors during checking the sanity of configuration statements. + + See Also + -------- + parse_targets : + Parsing the configuration statements of dispatch-able sources. + """ + to_compile = {} + baseline_flags = self.cpu_baseline_flags() + include_dirs = kwargs.setdefault("include_dirs", []) + + for src in sources: + output_dir = os.path.dirname(src) + if src_dir: + if not output_dir.startswith(src_dir): + output_dir = os.path.join(src_dir, output_dir) + if output_dir not in include_dirs: + # To allow including the generated config header(*.dispatch.h) + # by the dispatch-able sources + include_dirs.append(output_dir) + + has_baseline, targets, extra_flags = self.parse_targets(src) + nochange = self._generate_config(output_dir, src, targets, has_baseline) + for tar in targets: + tar_src = self._wrap_target(output_dir, src, tar, nochange=nochange) + flags = tuple(extra_flags + self.feature_flags(tar)) + to_compile.setdefault(flags, []).append(tar_src) + + if has_baseline: + flags = tuple(extra_flags + baseline_flags) + to_compile.setdefault(flags, []).append(src) + + self.sources_status[src] = (has_baseline, targets) + + # For these reasons, the sources are compiled in a separate loop: + # - Gathering all sources with the same flags to benefit from + # the parallel compiling as much as possible. + # - To generate all config headers of the dispatchable sources, + # before the compilation in case if there are dependency relationships + # among them. + objects = [] + for flags, srcs in to_compile.items(): + objects += self.dist_compile( + srcs, list(flags), ccompiler=ccompiler, **kwargs + ) + return objects + + def generate_dispatch_header(self, header_path): + """ + Generate the dispatch header which contains the #definitions and headers + for platform-specific instruction-sets for the enabled CPU baseline and + dispatch-able features. + + Its highly recommended to take a look at the generated header + also the generated source files via `try_dispatch()` + in order to get the full picture. + """ + self.dist_log("generate CPU dispatch header: (%s)" % header_path) + + baseline_names = self.cpu_baseline_names() + dispatch_names = self.cpu_dispatch_names() + baseline_len = len(baseline_names) + dispatch_len = len(dispatch_names) + + header_dir = os.path.dirname(header_path) + if not os.path.exists(header_dir): + self.dist_log( + f"dispatch header dir {header_dir} does not exist, creating it", + stderr=True + ) + os.makedirs(header_dir) + + with open(header_path, 'w') as f: + baseline_calls = ' \\\n'.join([ + ( + "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" + ) % (self.conf_c_prefix, f) + for f in baseline_names + ]) + dispatch_calls = ' \\\n'.join([ + ( + "\t%sWITH_CPU_EXPAND_(MACRO_TO_CALL(%s, __VA_ARGS__))" + ) % (self.conf_c_prefix, f) + for f in dispatch_names + ]) + f.write(textwrap.dedent("""\ + /* + * AUTOGENERATED DON'T EDIT + * Please make changes to the code generator (distutils/ccompiler_opt.py) + */ + #define {pfx}WITH_CPU_BASELINE "{baseline_str}" + #define {pfx}WITH_CPU_DISPATCH "{dispatch_str}" + #define {pfx}WITH_CPU_BASELINE_N {baseline_len} + #define {pfx}WITH_CPU_DISPATCH_N {dispatch_len} + #define {pfx}WITH_CPU_EXPAND_(X) X + #define {pfx}WITH_CPU_BASELINE_CALL(MACRO_TO_CALL, ...) \\ + {baseline_calls} + #define {pfx}WITH_CPU_DISPATCH_CALL(MACRO_TO_CALL, ...) \\ + {dispatch_calls} + """).format( + pfx=self.conf_c_prefix, baseline_str=" ".join(baseline_names), + dispatch_str=" ".join(dispatch_names), baseline_len=baseline_len, + dispatch_len=dispatch_len, baseline_calls=baseline_calls, + dispatch_calls=dispatch_calls + )) + baseline_pre = '' + for name in baseline_names: + baseline_pre += self.feature_c_preprocessor(name, tabs=1) + '\n' + + dispatch_pre = '' + for name in dispatch_names: + dispatch_pre += textwrap.dedent("""\ + #ifdef {pfx}CPU_TARGET_{name} + {pre} + #endif /*{pfx}CPU_TARGET_{name}*/ + """).format( + pfx=self.conf_c_prefix_, name=name, pre=self.feature_c_preprocessor( + name, tabs=1 + )) + + f.write(textwrap.dedent("""\ + /******* baseline features *******/ + {baseline_pre} + /******* dispatch features *******/ + {dispatch_pre} + """).format( + pfx=self.conf_c_prefix_, baseline_pre=baseline_pre, + dispatch_pre=dispatch_pre + )) + + def report(self, full=False): + report = [] + platform_rows = [] + baseline_rows = [] + dispatch_rows = [] + report.append(("Platform", platform_rows)) + report.append(("", "")) + report.append(("CPU baseline", baseline_rows)) + report.append(("", "")) + report.append(("CPU dispatch", dispatch_rows)) + + ########## platform ########## + platform_rows.append(("Architecture", ( + "unsupported" if self.cc_on_noarch else self.cc_march) + )) + platform_rows.append(("Compiler", ( + "unix-like" if self.cc_is_nocc else self.cc_name) + )) + ########## baseline ########## + if self.cc_noopt: + baseline_rows.append(("Requested", "optimization disabled")) + else: + baseline_rows.append(("Requested", repr(self._requested_baseline))) + + baseline_names = self.cpu_baseline_names() + baseline_rows.append(( + "Enabled", (' '.join(baseline_names) if baseline_names else "none") + )) + baseline_flags = self.cpu_baseline_flags() + baseline_rows.append(( + "Flags", (' '.join(baseline_flags) if baseline_flags else "none") + )) + extra_checks = [] + for name in baseline_names: + extra_checks += self.feature_extra_checks(name) + baseline_rows.append(( + "Extra checks", (' '.join(extra_checks) if extra_checks else "none") + )) + + ########## dispatch ########## + if self.cc_noopt: + baseline_rows.append(("Requested", "optimization disabled")) + else: + dispatch_rows.append(("Requested", repr(self._requested_dispatch))) + + dispatch_names = self.cpu_dispatch_names() + dispatch_rows.append(( + "Enabled", (' '.join(dispatch_names) if dispatch_names else "none") + )) + ########## Generated ########## + # TODO: + # - collect object names from 'try_dispatch()' + # then get size of each object and printed + # - give more details about the features that not + # generated due compiler support + # - find a better output's design. + # + target_sources = {} + for source, (_, targets) in self.sources_status.items(): + for tar in targets: + target_sources.setdefault(tar, []).append(source) + + if not full or not target_sources: + generated = "" + for tar in self.feature_sorted(target_sources): + sources = target_sources[tar] + name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) + generated += name + "[%d] " % len(sources) + dispatch_rows.append(("Generated", generated[:-1] if generated else "none")) + else: + dispatch_rows.append(("Generated", '')) + for tar in self.feature_sorted(target_sources): + sources = target_sources[tar] + pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar) + flags = ' '.join(self.feature_flags(tar)) + implies = ' '.join(self.feature_sorted(self.feature_implies(tar))) + detect = ' '.join(self.feature_detect(tar)) + extra_checks = [] + for name in ((tar,) if isinstance(tar, str) else tar): + extra_checks += self.feature_extra_checks(name) + extra_checks = (' '.join(extra_checks) if extra_checks else "none") + + dispatch_rows.append(('', '')) + dispatch_rows.append((pretty_name, implies)) + dispatch_rows.append(("Flags", flags)) + dispatch_rows.append(("Extra checks", extra_checks)) + dispatch_rows.append(("Detect", detect)) + for src in sources: + dispatch_rows.append(("", src)) + + ############################### + # TODO: add support for 'markdown' format + text = [] + secs_len = [len(secs) for secs, _ in report] + cols_len = [len(col) for _, rows in report for col, _ in rows] + tab = ' ' * 2 + pad = max(max(secs_len), max(cols_len)) + for sec, rows in report: + if not sec: + text.append("") # empty line + continue + sec += ' ' * (pad - len(sec)) + text.append(sec + tab + ': ') + for col, val in rows: + col += ' ' * (pad - len(col)) + text.append(tab + col + ': ' + val) + + return '\n'.join(text) + + def _wrap_target(self, output_dir, dispatch_src, target, nochange=False): + assert(isinstance(target, (str, tuple))) + if isinstance(target, str): + ext_name = target_name = target + else: + # multi-target + ext_name = '.'.join(target) + target_name = '__'.join(target) + + wrap_path = os.path.join(output_dir, os.path.basename(dispatch_src)) + wrap_path = "{0}.{2}{1}".format(*os.path.splitext(wrap_path), ext_name.lower()) + if nochange and os.path.exists(wrap_path): + return wrap_path + + self.dist_log("wrap dispatch-able target -> ", wrap_path) + # sorting for readability + features = self.feature_sorted(self.feature_implies_c(target)) + target_join = "#define %sCPU_TARGET_" % self.conf_c_prefix_ + target_defs = [target_join + f for f in features] + target_defs = '\n'.join(target_defs) + + with open(wrap_path, "w") as fd: + fd.write(textwrap.dedent("""\ + /** + * AUTOGENERATED DON'T EDIT + * Please make changes to the code generator \ + (distutils/ccompiler_opt.py) + */ + #define {pfx}CPU_TARGET_MODE + #define {pfx}CPU_TARGET_CURRENT {target_name} + {target_defs} + #include "{path}" + """).format( + pfx=self.conf_c_prefix_, target_name=target_name, + path=os.path.abspath(dispatch_src), target_defs=target_defs + )) + return wrap_path + + def _generate_config(self, output_dir, dispatch_src, targets, has_baseline=False): + config_path = os.path.basename(dispatch_src) + config_path = os.path.splitext(config_path)[0] + '.h' + config_path = os.path.join(output_dir, config_path) + # check if targets didn't change to avoid recompiling + cache_hash = self.cache_hash(targets, has_baseline) + try: + with open(config_path) as f: + last_hash = f.readline().split("cache_hash:") + if len(last_hash) == 2 and int(last_hash[1]) == cache_hash: + return True + except OSError: + pass + + os.makedirs(os.path.dirname(config_path), exist_ok=True) + + self.dist_log("generate dispatched config -> ", config_path) + dispatch_calls = [] + for tar in targets: + if isinstance(tar, str): + target_name = tar + else: # multi target + target_name = '__'.join([t for t in tar]) + req_detect = self.feature_detect(tar) + req_detect = '&&'.join([ + "CHK(%s)" % f for f in req_detect + ]) + dispatch_calls.append( + "\t%sCPU_DISPATCH_EXPAND_(CB((%s), %s, __VA_ARGS__))" % ( + self.conf_c_prefix_, req_detect, target_name + )) + dispatch_calls = ' \\\n'.join(dispatch_calls) + + if has_baseline: + baseline_calls = ( + "\t%sCPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))" + ) % self.conf_c_prefix_ + else: + baseline_calls = '' + + with open(config_path, "w") as fd: + fd.write(textwrap.dedent("""\ + // cache_hash:{cache_hash} + /** + * AUTOGENERATED DON'T EDIT + * Please make changes to the code generator (distutils/ccompiler_opt.py) + */ + #ifndef {pfx}CPU_DISPATCH_EXPAND_ + #define {pfx}CPU_DISPATCH_EXPAND_(X) X + #endif + #undef {pfx}CPU_DISPATCH_BASELINE_CALL + #undef {pfx}CPU_DISPATCH_CALL + #define {pfx}CPU_DISPATCH_BASELINE_CALL(CB, ...) \\ + {baseline_calls} + #define {pfx}CPU_DISPATCH_CALL(CHK, CB, ...) \\ + {dispatch_calls} + """).format( + pfx=self.conf_c_prefix_, baseline_calls=baseline_calls, + dispatch_calls=dispatch_calls, cache_hash=cache_hash + )) + return False + +def new_ccompiler_opt(compiler, dispatch_hpath, **kwargs): + """ + Create a new instance of 'CCompilerOpt' and generate the dispatch header + which contains the #definitions and headers of platform-specific instruction-sets for + the enabled CPU baseline and dispatch-able features. + + Parameters + ---------- + compiler : CCompiler instance + dispatch_hpath : str + path of the dispatch header + + **kwargs: passed as-is to `CCompilerOpt(...)` + Returns + ------- + new instance of CCompilerOpt + """ + opt = CCompilerOpt(compiler, **kwargs) + if not os.path.exists(dispatch_hpath) or not opt.is_cached(): + opt.generate_dispatch_header(dispatch_hpath) + return opt diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_asimd.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_asimd.c new file mode 100644 index 0000000000000000000000000000000000000000..6bc9022a58d3cd087d167d354224ded89be91884 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_asimd.c @@ -0,0 +1,27 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float *src = (float*)argv[argc-1]; + float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]); + /* MAXMIN */ + int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0); + ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0); + /* ROUNDING */ + ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0); +#ifdef __aarch64__ + { + double *src2 = (double*)argv[argc-1]; + float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]); + /* MAXMIN */ + ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0); + ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0); + /* ROUNDING */ + ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0); + } +#endif + return ret; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_asimdfhm.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_asimdfhm.c new file mode 100644 index 0000000000000000000000000000000000000000..54e328098d17b57445024c9859cd4992492c348a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_asimdfhm.c @@ -0,0 +1,19 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float16_t *src = (float16_t*)argv[argc-1]; + float *src2 = (float*)argv[argc-2]; + float16x8_t vhp = vdupq_n_f16(src[0]); + float16x4_t vlhp = vdup_n_f16(src[1]); + float32x4_t vf = vdupq_n_f32(src2[0]); + float32x2_t vlf = vdup_n_f32(src2[1]); + + int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0); + ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0); + + return ret; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_asimdhp.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_asimdhp.c new file mode 100644 index 0000000000000000000000000000000000000000..e2de0306e0acaeda3b861756e598a132f8e1ca9f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_asimdhp.c @@ -0,0 +1,15 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + float16_t *src = (float16_t*)argv[argc-1]; + float16x8_t vhp = vdupq_n_f16(src[0]); + float16x4_t vlhp = vdup_n_f16(src[1]); + + int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0); + ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0); + return ret; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_avx2.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_avx2.c new file mode 100644 index 0000000000000000000000000000000000000000..ddde868f1b586c7b066c2284556b65ec5fef834e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_avx2.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __AVX2__ + #error "HOST/ARCH doesn't support AVX2" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1])); + return _mm_cvtsi128_si32(_mm256_castsi256_si128(a)); +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c new file mode 100644 index 0000000000000000000000000000000000000000..5799f122b511420eb16d066c31dc218bc4fae110 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c @@ -0,0 +1,24 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__) + #error "HOST/ARCH doesn't support CannonLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + /* IFMA */ + a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512()); + /* VMBI */ + a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_avx512_icl.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_avx512_icl.c new file mode 100644 index 0000000000000000000000000000000000000000..3cf44d73164b6a80eca5f23f699bd00dba1f623e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_avx512_icl.c @@ -0,0 +1,26 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__) + #error "HOST/ARCH doesn't support IceLake AVX512 features" + #endif +#endif + +#include + +int main(int argc, char **argv) +{ + __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); + /* VBMI2 */ + a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512()); + /* BITLAG */ + a = _mm512_popcnt_epi8(a); + /* VPOPCNTDQ */ + a = _mm512_popcnt_epi64(a); + return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_f16c.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_f16c.c new file mode 100644 index 0000000000000000000000000000000000000000..fdf36cec580ce9c24fbb9d2a60fdfcaa824b3f11 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_f16c.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __F16C__ + #error "HOST/ARCH doesn't support F16C" + #endif +#endif + +#include +#include + +int main(int argc, char **argv) +{ + __m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1])); + __m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2])); + return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8))); +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_fma3.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_fma3.c new file mode 100644 index 0000000000000000000000000000000000000000..bfeef22b5f0e86becd6b9f7a8b5b0f4bdea73202 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_fma3.c @@ -0,0 +1,22 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #if !defined(__FMA__) && !defined(__AVX2__) + #error "HOST/ARCH doesn't support FMA3" + #endif +#endif + +#include +#include + +int main(int argc, char **argv) +{ + __m256 a = _mm256_loadu_ps((const float*)argv[argc-1]); + a = _mm256_fmadd_ps(a, a, a); + return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a)); +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_neon.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_neon.c new file mode 100644 index 0000000000000000000000000000000000000000..8c64f864dea63cb9c4ee60249e52b1ad528751c7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_neon.c @@ -0,0 +1,19 @@ +#ifdef _MSC_VER + #include +#endif +#include + +int main(int argc, char **argv) +{ + // passing from untraced pointers to avoid optimizing out any constants + // so we can test against the linker. + float *src = (float*)argv[argc-1]; + float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]); + int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0); +#ifdef __aarch64__ + double *src2 = (double*)argv[argc-2]; + float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]); + ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0); +#endif + return ret; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_sse42.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_sse42.c new file mode 100644 index 0000000000000000000000000000000000000000..f60e18f3c4f13d58bc9e8ac84752612b5ad11830 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_sse42.c @@ -0,0 +1,20 @@ +#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) + /* + * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, + * whether or not the build options for those features are specified. + * Therefore, we must test #definitions of CPU features when option native/host + * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise + * the test will be broken and leads to enable all possible features. + */ + #ifndef __SSE4_2__ + #error "HOST/ARCH doesn't support SSE42" + #endif +#endif + +#include + +int main(void) +{ + __m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps()); + return (int)_mm_cvtss_f32(a); +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vsx.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vsx.c new file mode 100644 index 0000000000000000000000000000000000000000..0b3f30d6a1f43ff32d5c6545560ef3aa41c828fb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vsx.c @@ -0,0 +1,21 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) + #define vsx_ld vec_vsx_ld + #define vsx_st vec_vsx_st +#else + #define vsx_ld vec_xl + #define vsx_st vec_xst +#endif + +int main(void) +{ + unsigned int zout[4]; + unsigned int z4[] = {0, 0, 0, 0}; + __vector unsigned int v_z4 = vsx_ld(0, z4); + vsx_st(v_z4, 0, zout); + return zout[0]; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vsx2.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vsx2.c new file mode 100644 index 0000000000000000000000000000000000000000..410fb29d6db5abab4c6b2a99308f99cce07c10b2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vsx2.c @@ -0,0 +1,13 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned long long v_uint64x2; + +int main(void) +{ + v_uint64x2 z2 = (v_uint64x2){0, 0}; + z2 = (v_uint64x2)vec_cmpeq(z2, z2); + return (int)vec_extract(z2, 0); +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vsx4.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vsx4.c new file mode 100644 index 0000000000000000000000000000000000000000..a6acc7384dd95f7ef51d17c85492342dde353d0d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vsx4.c @@ -0,0 +1,14 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector unsigned int v_uint32x4; + +int main(void) +{ + v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16}; + v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2}; + v_uint32x4 v3 = vec_mod(v1, v2); + return (int)vec_extractm(v3); +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vxe2.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vxe2.c new file mode 100644 index 0000000000000000000000000000000000000000..f36d57129af67f111fa9dccca55f76dc52e6001d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/cpu_vxe2.c @@ -0,0 +1,21 @@ +#if (__VEC__ < 10303) || (__ARCH__ < 13) + #error VXE2 not supported +#endif + +#include + +int main(int argc, char **argv) +{ + int val; + __vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' }; + __vector signed short search = { 'g', 'h', 'g', 'o' }; + __vector unsigned char len = { 0 }; + __vector unsigned char res = vec_search_string_cc(large, search, len, &val); + __vector float x = vec_xl(argc, (float*)argv); + __vector int i = vec_signed(x); + + i = vec_srdb(vec_sldb(i, i, 2), i, 3); + val += (int)vec_extract(res, 1); + val += vec_extract(i, 0); + return val; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c new file mode 100644 index 0000000000000000000000000000000000000000..db01aaeef40570139d5df0f2f2a9e91e26f97f74 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c @@ -0,0 +1,41 @@ +#include +/** + * The following intrinsics don't have direct native support but compilers + * tend to emulate them. + * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19 + */ +int main(void) +{ + __m512 one_ps = _mm512_set1_ps(1.0f); + __m512d one_pd = _mm512_set1_pd(1.0); + __m512i one_i64 = _mm512_set1_epi64(1); + // add + float sum_ps = _mm512_reduce_add_ps(one_ps); + double sum_pd = _mm512_reduce_add_pd(one_pd); + int sum_int = (int)_mm512_reduce_add_epi64(one_i64); + sum_int += (int)_mm512_reduce_add_epi32(one_i64); + // mul + sum_ps += _mm512_reduce_mul_ps(one_ps); + sum_pd += _mm512_reduce_mul_pd(one_pd); + sum_int += (int)_mm512_reduce_mul_epi64(one_i64); + sum_int += (int)_mm512_reduce_mul_epi32(one_i64); + // min + sum_ps += _mm512_reduce_min_ps(one_ps); + sum_pd += _mm512_reduce_min_pd(one_pd); + sum_int += (int)_mm512_reduce_min_epi32(one_i64); + sum_int += (int)_mm512_reduce_min_epu32(one_i64); + sum_int += (int)_mm512_reduce_min_epi64(one_i64); + // max + sum_ps += _mm512_reduce_max_ps(one_ps); + sum_pd += _mm512_reduce_max_pd(one_pd); + sum_int += (int)_mm512_reduce_max_epi32(one_i64); + sum_int += (int)_mm512_reduce_max_epu32(one_i64); + sum_int += (int)_mm512_reduce_max_epi64(one_i64); + // and + sum_int += (int)_mm512_reduce_and_epi32(one_i64); + sum_int += (int)_mm512_reduce_and_epi64(one_i64); + // or + sum_int += (int)_mm512_reduce_or_epi32(one_i64); + sum_int += (int)_mm512_reduce_or_epi64(one_i64); + return (int)sum_ps + (int)sum_pd + sum_int; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c new file mode 100644 index 0000000000000000000000000000000000000000..514a2b18f96cb089bb3c96f6420356c892adefdf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c @@ -0,0 +1,12 @@ +/** + * Assembler may not fully support the following VSX3 scalar + * instructions, even though compilers report VSX3 support. + */ +int main(void) +{ + unsigned short bits = 0xFF; + double f; + __asm__ __volatile__("xscvhpdp %x0,%x1" : "=wa"(f) : "wa"(bits)); + __asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits) : "wa" (f)); + return bits; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_vsx4_mma.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_vsx4_mma.c new file mode 100644 index 0000000000000000000000000000000000000000..a70b2a9f6f95408eb7cfe59c056f114cc363869b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_vsx4_mma.c @@ -0,0 +1,21 @@ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +typedef __vector float fv4sf_t; +typedef __vector unsigned char vec_t; + +int main(void) +{ + __vector_quad acc0; + float a[4] = {0,1,2,3}; + float b[4] = {0,1,2,3}; + vec_t *va = (vec_t *) a; + vec_t *vb = (vec_t *) b; + __builtin_mma_xvf32ger(&acc0, va[0], vb[0]); + fv4sf_t result[4]; + __builtin_mma_disassemble_acc((void *)result, &acc0); + fv4sf_t c0 = result[0]; + return (int)((float*)&c0)[0]; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_vsx_asm.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_vsx_asm.c new file mode 100644 index 0000000000000000000000000000000000000000..b73a6f43808eeb5af2bd212ee88b6c1002a29901 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/extra_vsx_asm.c @@ -0,0 +1,36 @@ +/** + * Testing ASM VSX register number fixer '%x' + * + * old versions of CLANG doesn't support %x in the inline asm template + * which fixes register number when using any of the register constraints wa, wd, wf. + * + * xref: + * - https://bugs.llvm.org/show_bug.cgi?id=31837 + * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html + */ +#ifndef __VSX__ + #error "VSX is not supported" +#endif +#include + +#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__)) + #define vsx_ld vec_vsx_ld + #define vsx_st vec_vsx_st +#else + #define vsx_ld vec_xl + #define vsx_st vec_xst +#endif + +int main(void) +{ + float z4[] = {0, 0, 0, 0}; + signed int zout[] = {0, 0, 0, 0}; + + __vector float vz4 = vsx_ld(0, z4); + __vector signed int asm_ret = vsx_ld(0, zout); + + __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret)); + + vsx_st(asm_ret, 0, zout); + return zout[0]; +} diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/checks/test_flags.c b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/test_flags.c new file mode 100644 index 0000000000000000000000000000000000000000..4cd09d42a6503780087632aae9ea5b458671fa57 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/checks/test_flags.c @@ -0,0 +1 @@ +int test_flags; diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/conv_template.py b/.venv/lib/python3.11/site-packages/numpy/distutils/conv_template.py new file mode 100644 index 0000000000000000000000000000000000000000..c8933d1d42865f745bb985f7f9068a96985997f7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/conv_template.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python3 +""" +takes templated file .xxx.src and produces .xxx file where .xxx is +.i or .c or .h, using the following template rules + +/**begin repeat -- on a line by itself marks the start of a repeated code + segment +/**end repeat**/ -- on a line by itself marks it's end + +After the /**begin repeat and before the */, all the named templates are placed +these should all have the same number of replacements + +Repeat blocks can be nested, with each nested block labeled with its depth, +i.e. +/**begin repeat1 + *.... + */ +/**end repeat1**/ + +When using nested loops, you can optionally exclude particular +combinations of the variables using (inside the comment portion of the inner loop): + + :exclude: var1=value1, var2=value2, ... + +This will exclude the pattern where var1 is value1 and var2 is value2 when +the result is being generated. + + +In the main body each replace will use one entry from the list of named replacements + + Note that all #..# forms in a block must have the same number of + comma-separated entries. + +Example: + + An input file containing + + /**begin repeat + * #a = 1,2,3# + * #b = 1,2,3# + */ + + /**begin repeat1 + * #c = ted, jim# + */ + @a@, @b@, @c@ + /**end repeat1**/ + + /**end repeat**/ + + produces + + line 1 "template.c.src" + + /* + ********************************************************************* + ** This file was autogenerated from a template DO NOT EDIT!!** + ** Changes should be made to the original source (.src) file ** + ********************************************************************* + */ + + #line 9 + 1, 1, ted + + #line 9 + 1, 1, jim + + #line 9 + 2, 2, ted + + #line 9 + 2, 2, jim + + #line 9 + 3, 3, ted + + #line 9 + 3, 3, jim + +""" + +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +# names for replacement that are already global. +global_names = {} + +# header placed at the front of head processed file +header =\ +""" +/* + ***************************************************************************** + ** This file was autogenerated from a template DO NOT EDIT!!!! ** + ** Changes should be made to the original source (.src) file ** + ***************************************************************************** + */ + +""" +# Parse string for repeat loops +def parse_structure(astr, level): + """ + The returned line number is from the beginning of the string, starting + at zero. Returns an empty list if no loops found. + + """ + if level == 0 : + loopbeg = "/**begin repeat" + loopend = "/**end repeat**/" + else : + loopbeg = "/**begin repeat%d" % level + loopend = "/**end repeat%d**/" % level + + ind = 0 + line = 0 + spanlist = [] + while True: + start = astr.find(loopbeg, ind) + if start == -1: + break + start2 = astr.find("*/", start) + start2 = astr.find("\n", start2) + fini1 = astr.find(loopend, start2) + fini2 = astr.find("\n", fini1) + line += astr.count("\n", ind, start2+1) + spanlist.append((start, start2+1, fini1, fini2+1, line)) + line += astr.count("\n", start2+1, fini2) + ind = fini2 + spanlist.sort() + return spanlist + + +def paren_repl(obj): + torep = obj.group(1) + numrep = obj.group(2) + return ','.join([torep]*int(numrep)) + +parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") +plainrep = re.compile(r"([^*]+)\*(\d+)") +def parse_values(astr): + # replaces all occurrences of '(a,b,c)*4' in astr + # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate + # empty values, i.e., ()*4 yields ',,,'. The result is + # split at ',' and a list of values returned. + astr = parenrep.sub(paren_repl, astr) + # replaces occurrences of xxx*3 with xxx, xxx, xxx + astr = ','.join([plainrep.sub(paren_repl, x.strip()) + for x in astr.split(',')]) + return astr.split(',') + + +stripast = re.compile(r"\n\s*\*?") +named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") +exclude_vars_re = re.compile(r"(\w*)=(\w*)") +exclude_re = re.compile(":exclude:") +def parse_loop_header(loophead) : + """Find all named replacements in the header + + Returns a list of dictionaries, one for each loop iteration, + where each key is a name to be substituted and the corresponding + value is the replacement string. + + Also return a list of exclusions. The exclusions are dictionaries + of key value pairs. There can be more than one exclusion. + [{'var1':'value1', 'var2', 'value2'[,...]}, ...] + + """ + # Strip out '\n' and leading '*', if any, in continuation lines. + # This should not effect code previous to this change as + # continuation lines were not allowed. + loophead = stripast.sub("", loophead) + # parse out the names and lists of values + names = [] + reps = named_re.findall(loophead) + nsub = None + for rep in reps: + name = rep[0] + vals = parse_values(rep[1]) + size = len(vals) + if nsub is None : + nsub = size + elif nsub != size : + msg = "Mismatch in number of values, %d != %d\n%s = %s" + raise ValueError(msg % (nsub, size, name, vals)) + names.append((name, vals)) + + + # Find any exclude variables + excludes = [] + + for obj in exclude_re.finditer(loophead): + span = obj.span() + # find next newline + endline = loophead.find('\n', span[1]) + substr = loophead[span[1]:endline] + ex_names = exclude_vars_re.findall(substr) + excludes.append(dict(ex_names)) + + # generate list of dictionaries, one for each template iteration + dlist = [] + if nsub is None : + raise ValueError("No substitution variables found") + for i in range(nsub): + tmp = {name: vals[i] for name, vals in names} + dlist.append(tmp) + return dlist + +replace_re = re.compile(r"@(\w+)@") +def parse_string(astr, env, level, line) : + lineno = "#line %d\n" % line + + # local function for string replacement, uses env + def replace(match): + name = match.group(1) + try : + val = env[name] + except KeyError: + msg = 'line %d: no definition of key "%s"'%(line, name) + raise ValueError(msg) from None + return val + + code = [lineno] + struct = parse_structure(astr, level) + if struct : + # recurse over inner loops + oldend = 0 + newlevel = level + 1 + for sub in struct: + pref = astr[oldend:sub[0]] + head = astr[sub[0]:sub[1]] + text = astr[sub[1]:sub[2]] + oldend = sub[3] + newline = line + sub[4] + code.append(replace_re.sub(replace, pref)) + try : + envlist = parse_loop_header(head) + except ValueError as e: + msg = "line %d: %s" % (newline, e) + raise ValueError(msg) + for newenv in envlist : + newenv.update(env) + newcode = parse_string(text, newenv, newlevel, newline) + code.extend(newcode) + suff = astr[oldend:] + code.append(replace_re.sub(replace, suff)) + else : + # replace keys + code.append(replace_re.sub(replace, astr)) + code.append('\n') + return ''.join(code) + +def process_str(astr): + code = [header] + code.extend(parse_string(astr, global_names, 0, 1)) + return ''.join(code) + + +include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" + r"(?P[\w\d./\\]+[.]src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + sourcefile = os.path.normcase(source).replace("\\", "\\\\") + try: + code = process_str(''.join(lines)) + except ValueError as e: + raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None + return '#line 1 "%s"\n%s' % (sourcefile, code) + + +def unique_key(adict): + # this obtains a unique key given a dictionary + # currently it works by appending together n of the letters of the + # current keys and increasing n until a unique key is found + # -- not particularly quick + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = "".join([x[:n] for x in allkeys]) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +def main(): + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + try: + writestr = process_str(allstr) + except ValueError as e: + raise ValueError("In %s loop at %s" % (file, e)) from None + + outfile.write(writestr) + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/core.py b/.venv/lib/python3.11/site-packages/numpy/distutils/core.py new file mode 100644 index 0000000000000000000000000000000000000000..1cdc739731bfb073a580202f0cd0a36c5d3cb5aa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/core.py @@ -0,0 +1,216 @@ +import sys +from distutils.core import Distribution + +if 'setuptools' in sys.modules: + have_setuptools = True + from setuptools import setup as old_setup + # easy_install imports math, it may be picked up from cwd + from setuptools.command import easy_install + try: + # very old versions of setuptools don't have this + from setuptools.command import bdist_egg + except ImportError: + have_setuptools = False +else: + from distutils.core import setup as old_setup + have_setuptools = False + +import warnings +import distutils.core +import distutils.dist + +from numpy.distutils.extension import Extension # noqa: F401 +from numpy.distutils.numpy_distribution import NumpyDistribution +from numpy.distutils.command import config, config_compiler, \ + build, build_py, build_ext, build_clib, build_src, build_scripts, \ + sdist, install_data, install_headers, install, bdist_rpm, \ + install_clib +from numpy.distutils.misc_util import is_sequence, is_string + +numpy_cmdclass = {'build': build.build, + 'build_src': build_src.build_src, + 'build_scripts': build_scripts.build_scripts, + 'config_cc': config_compiler.config_cc, + 'config_fc': config_compiler.config_fc, + 'config': config.config, + 'build_ext': build_ext.build_ext, + 'build_py': build_py.build_py, + 'build_clib': build_clib.build_clib, + 'sdist': sdist.sdist, + 'install_data': install_data.install_data, + 'install_headers': install_headers.install_headers, + 'install_clib': install_clib.install_clib, + 'install': install.install, + 'bdist_rpm': bdist_rpm.bdist_rpm, + } +if have_setuptools: + # Use our own versions of develop and egg_info to ensure that build_src is + # handled appropriately. + from numpy.distutils.command import develop, egg_info + numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg + numpy_cmdclass['develop'] = develop.develop + numpy_cmdclass['easy_install'] = easy_install.easy_install + numpy_cmdclass['egg_info'] = egg_info.egg_info + +def _dict_append(d, **kws): + for k, v in kws.items(): + if k not in d: + d[k] = v + continue + dv = d[k] + if isinstance(dv, tuple): + d[k] = dv + tuple(v) + elif isinstance(dv, list): + d[k] = dv + list(v) + elif isinstance(dv, dict): + _dict_append(dv, **v) + elif is_string(dv): + assert is_string(v) + d[k] = v + else: + raise TypeError(repr(type(dv))) + +def _command_line_ok(_cache=None): + """ Return True if command line does not contain any + help or display requests. + """ + if _cache: + return _cache[0] + elif _cache is None: + _cache = [] + ok = True + display_opts = ['--'+n for n in Distribution.display_option_names] + for o in Distribution.display_options: + if o[1]: + display_opts.append('-'+o[1]) + for arg in sys.argv: + if arg.startswith('--help') or arg=='-h' or arg in display_opts: + ok = False + break + _cache.append(ok) + return ok + +def get_distribution(always=False): + dist = distutils.core._setup_distribution + # XXX Hack to get numpy installable with easy_install. + # The problem is easy_install runs it's own setup(), which + # sets up distutils.core._setup_distribution. However, + # when our setup() runs, that gets overwritten and lost. + # We can't use isinstance, as the DistributionWithoutHelpCommands + # class is local to a function in setuptools.command.easy_install + if dist is not None and \ + 'DistributionWithoutHelpCommands' in repr(dist): + dist = None + if always and dist is None: + dist = NumpyDistribution() + return dist + +def setup(**attr): + + cmdclass = numpy_cmdclass.copy() + + new_attr = attr.copy() + if 'cmdclass' in new_attr: + cmdclass.update(new_attr['cmdclass']) + new_attr['cmdclass'] = cmdclass + + if 'configuration' in new_attr: + # To avoid calling configuration if there are any errors + # or help request in command in the line. + configuration = new_attr.pop('configuration') + + old_dist = distutils.core._setup_distribution + old_stop = distutils.core._setup_stop_after + distutils.core._setup_distribution = None + distutils.core._setup_stop_after = "commandline" + try: + dist = setup(**new_attr) + finally: + distutils.core._setup_distribution = old_dist + distutils.core._setup_stop_after = old_stop + if dist.help or not _command_line_ok(): + # probably displayed help, skip running any commands + return dist + + # create setup dictionary and append to new_attr + config = configuration() + if hasattr(config, 'todict'): + config = config.todict() + _dict_append(new_attr, **config) + + # Move extension source libraries to libraries + libraries = [] + for ext in new_attr.get('ext_modules', []): + new_libraries = [] + for item in ext.libraries: + if is_sequence(item): + lib_name, build_info = item + _check_append_ext_library(libraries, lib_name, build_info) + new_libraries.append(lib_name) + elif is_string(item): + new_libraries.append(item) + else: + raise TypeError("invalid description of extension module " + "library %r" % (item,)) + ext.libraries = new_libraries + if libraries: + if 'libraries' not in new_attr: + new_attr['libraries'] = [] + for item in libraries: + _check_append_library(new_attr['libraries'], item) + + # sources in ext_modules or libraries may contain header files + if ('ext_modules' in new_attr or 'libraries' in new_attr) \ + and 'headers' not in new_attr: + new_attr['headers'] = [] + + # Use our custom NumpyDistribution class instead of distutils' one + new_attr['distclass'] = NumpyDistribution + + return old_setup(**new_attr) + +def _check_append_library(libraries, item): + for libitem in libraries: + if is_sequence(libitem): + if is_sequence(item): + if item[0]==libitem[0]: + if item[1] is libitem[1]: + return + warnings.warn("[0] libraries list contains %r with" + " different build_info" % (item[0],), + stacklevel=2) + break + else: + if item==libitem[0]: + warnings.warn("[1] libraries list contains %r with" + " no build_info" % (item[0],), + stacklevel=2) + break + else: + if is_sequence(item): + if item[0]==libitem: + warnings.warn("[2] libraries list contains %r with" + " no build_info" % (item[0],), + stacklevel=2) + break + else: + if item==libitem: + return + libraries.append(item) + +def _check_append_ext_library(libraries, lib_name, build_info): + for item in libraries: + if is_sequence(item): + if item[0]==lib_name: + if item[1] is build_info: + return + warnings.warn("[3] libraries list contains %r with" + " different build_info" % (lib_name,), + stacklevel=2) + break + elif item==lib_name: + warnings.warn("[4] libraries list contains %r with" + " no build_info" % (lib_name,), + stacklevel=2) + break + libraries.append((lib_name, build_info)) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/cpuinfo.py b/.venv/lib/python3.11/site-packages/numpy/distutils/cpuinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..77620210981dd1e97d87a078344b3735c3cc6e1d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/cpuinfo.py @@ -0,0 +1,683 @@ +#!/usr/bin/env python3 +""" +cpuinfo + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +Pearu Peterson + +""" +__all__ = ['cpu'] + +import os +import platform +import re +import sys +import types +import warnings + +from subprocess import getstatusoutput + + +def getoutput(cmd, successful_status=(0,), stacklevel=1): + try: + status, output = getstatusoutput(cmd) + except OSError as e: + warnings.warn(str(e), UserWarning, stacklevel=stacklevel) + return False, "" + if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: + return True, output + return False, output + +def command_info(successful_status=(0,), stacklevel=1, **kw): + info = {} + for key in kw: + ok, output = getoutput(kw[key], successful_status=successful_status, + stacklevel=stacklevel+1) + if ok: + info[key] = output.strip() + return info + +def command_by_line(cmd, successful_status=(0,), stacklevel=1): + ok, output = getoutput(cmd, successful_status=successful_status, + stacklevel=stacklevel+1) + if not ok: + return + for line in output.splitlines(): + yield line.strip() + +def key_value_from_command(cmd, sep, successful_status=(0,), + stacklevel=1): + d = {} + for line in command_by_line(cmd, successful_status=successful_status, + stacklevel=stacklevel+1): + l = [s.strip() for s in line.split(sep, 1)] + if len(l) == 2: + d[l[0]] = l[1] + return d + +class CPUInfoBase: + """Holds CPU information and provides methods for requiring + the availability of various CPU features. + """ + + def _try_call(self, func): + try: + return func() + except Exception: + pass + + def __getattr__(self, name): + if not name.startswith('_'): + if hasattr(self, '_'+name): + attr = getattr(self, '_'+name) + if isinstance(attr, types.MethodType): + return lambda func=self._try_call,attr=attr : func(attr) + else: + return lambda : None + raise AttributeError(name) + + def _getNCPUs(self): + return 1 + + def __get_nbits(self): + abits = platform.architecture()[0] + nbits = re.compile(r'(\d+)bit').search(abits).group(1) + return nbits + + def _is_32bit(self): + return self.__get_nbits() == '32' + + def _is_64bit(self): + return self.__get_nbits() == '64' + +class LinuxCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = [ {} ] + ok, output = getoutput('uname -m') + if ok: + info[0]['uname_m'] = output.strip() + try: + fo = open('/proc/cpuinfo') + except OSError as e: + warnings.warn(str(e), UserWarning, stacklevel=2) + else: + for line in fo: + name_value = [s.strip() for s in line.split(':', 1)] + if len(name_value) != 2: + continue + name, value = name_value + if not info or name in info[-1]: # next processor + info.append({}) + info[-1][name] = value + fo.close() + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['vendor_id']=='AuthenticAMD' + + def _is_AthlonK6_2(self): + return self._is_AMD() and self.info[0]['model'] == '2' + + def _is_AthlonK6_3(self): + return self._is_AMD() and self.info[0]['model'] == '3' + + def _is_AthlonK6(self): + return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None + + def _is_AthlonK7(self): + return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None + + def _is_AthlonMP(self): + return re.match(r'.*?Athlon\(tm\) MP\b', + self.info[0]['model name']) is not None + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['family'] == '15' + + def _is_Athlon64(self): + return re.match(r'.*?Athlon\(tm\) 64\b', + self.info[0]['model name']) is not None + + def _is_AthlonHX(self): + return re.match(r'.*?Athlon HX\b', + self.info[0]['model name']) is not None + + def _is_Opteron(self): + return re.match(r'.*?Opteron\b', + self.info[0]['model name']) is not None + + def _is_Hammer(self): + return re.match(r'.*?Hammer\b', + self.info[0]['model name']) is not None + + # Alpha + + def _is_Alpha(self): + return self.info[0]['cpu']=='Alpha' + + def _is_EV4(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' + + def _is_EV5(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' + + def _is_EV56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' + + def _is_PCA56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' + + # Intel + + #XXX + _is_i386 = _not_impl + + def _is_Intel(self): + return self.info[0]['vendor_id']=='GenuineIntel' + + def _is_i486(self): + return self.info[0]['cpu']=='i486' + + def _is_i586(self): + return self.is_Intel() and self.info[0]['cpu family'] == '5' + + def _is_i686(self): + return self.is_Intel() and self.info[0]['cpu family'] == '6' + + def _is_Celeron(self): + return re.match(r'.*?Celeron', + self.info[0]['model name']) is not None + + def _is_Pentium(self): + return re.match(r'.*?Pentium', + self.info[0]['model name']) is not None + + def _is_PentiumII(self): + return re.match(r'.*?Pentium.*?II\b', + self.info[0]['model name']) is not None + + def _is_PentiumPro(self): + return re.match(r'.*?PentiumPro\b', + self.info[0]['model name']) is not None + + def _is_PentiumMMX(self): + return re.match(r'.*?Pentium.*?MMX\b', + self.info[0]['model name']) is not None + + def _is_PentiumIII(self): + return re.match(r'.*?Pentium.*?III\b', + self.info[0]['model name']) is not None + + def _is_PentiumIV(self): + return re.match(r'.*?Pentium.*?(IV|4)\b', + self.info[0]['model name']) is not None + + def _is_PentiumM(self): + return re.match(r'.*?Pentium.*?M\b', + self.info[0]['model name']) is not None + + def _is_Prescott(self): + return self.is_PentiumIV() and self.has_sse3() + + def _is_Nocona(self): + return (self.is_Intel() + and (self.info[0]['cpu family'] == '6' + or self.info[0]['cpu family'] == '15') + and (self.has_sse3() and not self.has_ssse3()) + and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) + + def _is_Core2(self): + return (self.is_64bit() and self.is_Intel() and + re.match(r'.*?Core\(TM\)2\b', + self.info[0]['model name']) is not None) + + def _is_Itanium(self): + return re.match(r'.*?Itanium\b', + self.info[0]['family']) is not None + + def _is_XEON(self): + return re.match(r'.*?XEON\b', + self.info[0]['model name'], re.IGNORECASE) is not None + + _is_Xeon = _is_XEON + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_fdiv_bug(self): + return self.info[0]['fdiv_bug']=='yes' + + def _has_f00f_bug(self): + return self.info[0]['f00f_bug']=='yes' + + def _has_mmx(self): + return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None + + def _has_sse(self): + return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None + + def _has_sse2(self): + return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None + + def _has_sse3(self): + return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None + + def _has_ssse3(self): + return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None + + def _has_3dnow(self): + return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None + + def _has_3dnowext(self): + return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None + +class IRIXCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = key_value_from_command('sysconf', sep=' ', + successful_status=(0, 1)) + self.__class__.info = info + + def _not_impl(self): pass + + def _is_singleCPU(self): + return self.info.get('NUM_PROCESSORS') == '1' + + def _getNCPUs(self): + return int(self.info.get('NUM_PROCESSORS', 1)) + + def __cputype(self, n): + return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) + def _is_r2000(self): return self.__cputype(2000) + def _is_r3000(self): return self.__cputype(3000) + def _is_r3900(self): return self.__cputype(3900) + def _is_r4000(self): return self.__cputype(4000) + def _is_r4100(self): return self.__cputype(4100) + def _is_r4300(self): return self.__cputype(4300) + def _is_r4400(self): return self.__cputype(4400) + def _is_r4600(self): return self.__cputype(4600) + def _is_r4650(self): return self.__cputype(4650) + def _is_r5000(self): return self.__cputype(5000) + def _is_r6000(self): return self.__cputype(6000) + def _is_r8000(self): return self.__cputype(8000) + def _is_r10000(self): return self.__cputype(10000) + def _is_r12000(self): return self.__cputype(12000) + def _is_rorion(self): return self.__cputype('orion') + + def get_ip(self): + try: return self.info.get('MACHINE') + except Exception: pass + def __machine(self, n): + return self.info.get('MACHINE').lower() == 'ip%s' % (n) + def _is_IP19(self): return self.__machine(19) + def _is_IP20(self): return self.__machine(20) + def _is_IP21(self): return self.__machine(21) + def _is_IP22(self): return self.__machine(22) + def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() + def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() + def _is_IP24(self): return self.__machine(24) + def _is_IP25(self): return self.__machine(25) + def _is_IP26(self): return self.__machine(26) + def _is_IP27(self): return self.__machine(27) + def _is_IP28(self): return self.__machine(28) + def _is_IP30(self): return self.__machine(30) + def _is_IP32(self): return self.__machine(32) + def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() + def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() + + +class DarwinCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + machine='machine') + info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') + self.__class__.info = info + + def _not_impl(self): pass + + def _getNCPUs(self): + return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) + + def _is_Power_Macintosh(self): + return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' + + def _is_i386(self): + return self.info['arch']=='i386' + def _is_ppc(self): + return self.info['arch']=='ppc' + + def __machine(self, n): + return self.info['machine'] == 'ppc%s'%n + def _is_ppc601(self): return self.__machine(601) + def _is_ppc602(self): return self.__machine(602) + def _is_ppc603(self): return self.__machine(603) + def _is_ppc603e(self): return self.__machine('603e') + def _is_ppc604(self): return self.__machine(604) + def _is_ppc604e(self): return self.__machine('604e') + def _is_ppc620(self): return self.__machine(620) + def _is_ppc630(self): return self.__machine(630) + def _is_ppc740(self): return self.__machine(740) + def _is_ppc7400(self): return self.__machine(7400) + def _is_ppc7450(self): return self.__machine(7450) + def _is_ppc750(self): return self.__machine(750) + def _is_ppc403(self): return self.__machine(403) + def _is_ppc505(self): return self.__machine(505) + def _is_ppc801(self): return self.__machine(801) + def _is_ppc821(self): return self.__machine(821) + def _is_ppc823(self): return self.__machine(823) + def _is_ppc860(self): return self.__machine(860) + + +class SunOSCPUInfo(CPUInfoBase): + + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + mach='mach', + uname_i='uname_i', + isainfo_b='isainfo -b', + isainfo_n='isainfo -n', + ) + info['uname_X'] = key_value_from_command('uname -X', sep='=') + for line in command_by_line('psrinfo -v 0'): + m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) + if m: + info['processor'] = m.group('p') + break + self.__class__.info = info + + def _not_impl(self): pass + + def _is_i386(self): + return self.info['isainfo_n']=='i386' + def _is_sparc(self): + return self.info['isainfo_n']=='sparc' + def _is_sparcv9(self): + return self.info['isainfo_n']=='sparcv9' + + def _getNCPUs(self): + return int(self.info['uname_X'].get('NumCPU', 1)) + + def _is_sun4(self): + return self.info['arch']=='sun4' + + def _is_SUNW(self): + return re.match(r'SUNW', self.info['uname_i']) is not None + def _is_sparcstation5(self): + return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None + def _is_ultra1(self): + return re.match(r'.*Ultra-1', self.info['uname_i']) is not None + def _is_ultra250(self): + return re.match(r'.*Ultra-250', self.info['uname_i']) is not None + def _is_ultra2(self): + return re.match(r'.*Ultra-2', self.info['uname_i']) is not None + def _is_ultra30(self): + return re.match(r'.*Ultra-30', self.info['uname_i']) is not None + def _is_ultra4(self): + return re.match(r'.*Ultra-4', self.info['uname_i']) is not None + def _is_ultra5_10(self): + return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None + def _is_ultra5(self): + return re.match(r'.*Ultra-5', self.info['uname_i']) is not None + def _is_ultra60(self): + return re.match(r'.*Ultra-60', self.info['uname_i']) is not None + def _is_ultra80(self): + return re.match(r'.*Ultra-80', self.info['uname_i']) is not None + def _is_ultraenterprice(self): + return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None + def _is_ultraenterprice10k(self): + return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None + def _is_sunfire(self): + return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None + def _is_ultra(self): + return re.match(r'.*Ultra', self.info['uname_i']) is not None + + def _is_cpusparcv7(self): + return self.info['processor']=='sparcv7' + def _is_cpusparcv8(self): + return self.info['processor']=='sparcv8' + def _is_cpusparcv9(self): + return self.info['processor']=='sparcv9' + +class Win32CPUInfo(CPUInfoBase): + + info = None + pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" + # XXX: what does the value of + # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 + # mean? + + def __init__(self): + if self.info is not None: + return + info = [] + try: + #XXX: Bad style to use so long `try:...except:...`. Fix it! + import winreg + + prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" + r"\s+stepping\s+(?P\d+)", re.IGNORECASE) + chnd=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self.pkey) + pnum=0 + while True: + try: + proc=winreg.EnumKey(chnd, pnum) + except winreg.error: + break + else: + pnum+=1 + info.append({"Processor":proc}) + phnd=winreg.OpenKey(chnd, proc) + pidx=0 + while True: + try: + name, value, vtpe=winreg.EnumValue(phnd, pidx) + except winreg.error: + break + else: + pidx=pidx+1 + info[-1][name]=value + if name=="Identifier": + srch=prgx.search(value) + if srch: + info[-1]["Family"]=int(srch.group("FML")) + info[-1]["Model"]=int(srch.group("MDL")) + info[-1]["Stepping"]=int(srch.group("STP")) + except Exception as e: + print(e, '(ignoring)') + self.__class__.info = info + + def _not_impl(self): pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['VendorIdentifier']=='AuthenticAMD' + + def _is_Am486(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_Am5x86(self): + return self.is_AMD() and self.info[0]['Family']==4 + + def _is_AMDK5(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [0, 1, 2, 3] + + def _is_AMDK6(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model'] in [6, 7] + + def _is_AMDK6_2(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==8 + + def _is_AMDK6_3(self): + return self.is_AMD() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==9 + + def _is_AMDK7(self): + return self.is_AMD() and self.info[0]['Family'] == 6 + + # To reliably distinguish between the different types of AMD64 chips + # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would + # require looking at the 'brand' from cpuid + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['Family'] == 15 + + # Intel + + def _is_Intel(self): + return self.info[0]['VendorIdentifier']=='GenuineIntel' + + def _is_i386(self): + return self.info[0]['Family']==3 + + def _is_i486(self): + return self.info[0]['Family']==4 + + def _is_i586(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_i686(self): + return self.is_Intel() and self.info[0]['Family']==6 + + def _is_Pentium(self): + return self.is_Intel() and self.info[0]['Family']==5 + + def _is_PentiumMMX(self): + return self.is_Intel() and self.info[0]['Family']==5 \ + and self.info[0]['Model']==4 + + def _is_PentiumPro(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model']==1 + + def _is_PentiumII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [3, 5, 6] + + def _is_PentiumIII(self): + return self.is_Intel() and self.info[0]['Family']==6 \ + and self.info[0]['Model'] in [7, 8, 9, 10, 11] + + def _is_PentiumIV(self): + return self.is_Intel() and self.info[0]['Family']==15 + + def _is_PentiumM(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [9, 13, 14] + + def _is_Core2(self): + return self.is_Intel() and self.info[0]['Family'] == 6 \ + and self.info[0]['Model'] in [15, 16, 17] + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_mmx(self): + if self.is_Intel(): + return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ + or (self.info[0]['Family'] in [6, 15]) + elif self.is_AMD(): + return self.info[0]['Family'] in [5, 6, 15] + else: + return False + + def _has_sse(self): + if self.is_Intel(): + return ((self.info[0]['Family']==6 and + self.info[0]['Model'] in [7, 8, 9, 10, 11]) + or self.info[0]['Family']==15) + elif self.is_AMD(): + return ((self.info[0]['Family']==6 and + self.info[0]['Model'] in [6, 7, 8, 10]) + or self.info[0]['Family']==15) + else: + return False + + def _has_sse2(self): + if self.is_Intel(): + return self.is_Pentium4() or self.is_PentiumM() \ + or self.is_Core2() + elif self.is_AMD(): + return self.is_AMD64() + else: + return False + + def _has_3dnow(self): + return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] + + def _has_3dnowext(self): + return self.is_AMD() and self.info[0]['Family'] in [6, 15] + +if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) + cpuinfo = LinuxCPUInfo +elif sys.platform.startswith('irix'): + cpuinfo = IRIXCPUInfo +elif sys.platform == 'darwin': + cpuinfo = DarwinCPUInfo +elif sys.platform.startswith('sunos'): + cpuinfo = SunOSCPUInfo +elif sys.platform.startswith('win32'): + cpuinfo = Win32CPUInfo +elif sys.platform.startswith('cygwin'): + cpuinfo = LinuxCPUInfo +#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. +else: + cpuinfo = CPUInfoBase + +cpu = cpuinfo() + +#if __name__ == "__main__": +# +# cpu.is_blaa() +# cpu.is_Intel() +# cpu.is_Alpha() +# +# print('CPU information:'), +# for name in dir(cpuinfo): +# if name[0]=='_' and name[1]!='_': +# r = getattr(cpu,name[1:])() +# if r: +# if r!=1: +# print('%s=%s' %(name[1:],r)) +# else: +# print(name[1:]), +# print() diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/exec_command.py b/.venv/lib/python3.11/site-packages/numpy/distutils/exec_command.py new file mode 100644 index 0000000000000000000000000000000000000000..a67453abf624c8b256f5613afdc7b7546957bc19 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/exec_command.py @@ -0,0 +1,315 @@ +""" +exec_command + +Implements exec_command function that is (almost) equivalent to +commands.getstatusoutput function but on NT, DOS systems the +returned status is actually correct (though, the returned status +values may be different by a factor). In addition, exec_command +takes keyword arguments for (re-)defining environment variables. + +Provides functions: + + exec_command --- execute command in a specified directory and + in the modified environment. + find_executable --- locate a command using info from environment + variable PATH. Equivalent to posix `which` + command. + +Author: Pearu Peterson +Created: 11 January 2003 + +Requires: Python 2.x + +Successfully tested on: + +======== ============ ================================================= +os.name sys.platform comments +======== ============ ================================================= +posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3 + PyCrust 0.9.3, Idle 1.0.2 +posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2 +posix sunos5 SunOS 5.9, Python 2.2, 2.3.2 +posix darwin Darwin 7.2.0, Python 2.3 +nt win32 Windows Me + Python 2.3(EE), Idle 1.0, PyCrust 0.7.2 + Python 2.1.1 Idle 0.8 +nt win32 Windows 98, Python 2.1.1. Idle 0.8 +nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests + fail i.e. redefining environment variables may + not work. FIXED: don't use cygwin echo! + Comment: also `cmd /c echo` will not work + but redefining environment variables do work. +posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special) +nt win32 Windows XP, Python 2.3.3 +======== ============ ================================================= + +Known bugs: + +* Tests, that send messages to stderr, fail when executed from MSYS prompt + because the messages are lost at some point. + +""" +__all__ = ['exec_command', 'find_executable'] + +import os +import sys +import subprocess +import locale +import warnings + +from numpy.distutils.misc_util import is_sequence, make_temp_file +from numpy.distutils import log + +def filepath_from_subprocess_output(output): + """ + Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`. + + Inherited from `exec_command`, and possibly incorrect. + """ + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + output = output.decode(mylocale, errors='replace') + output = output.replace('\r\n', '\n') + # Another historical oddity + if output[-1:] == '\n': + output = output[:-1] + return output + + +def forward_bytes_to_stdout(val): + """ + Forward bytes from a subprocess call to the console, without attempting to + decode them. + + The assumption is that the subprocess call already returned bytes in + a suitable encoding. + """ + if hasattr(sys.stdout, 'buffer'): + # use the underlying binary output if there is one + sys.stdout.buffer.write(val) + elif hasattr(sys.stdout, 'encoding'): + # round-trip the encoding if necessary + sys.stdout.write(val.decode(sys.stdout.encoding)) + else: + # make a best-guess at the encoding + sys.stdout.write(val.decode('utf8', errors='replace')) + + +def temp_file_name(): + # 2019-01-30, 1.17 + warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' + 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) + fo, name = make_temp_file() + fo.close() + return name + +def get_pythonexe(): + pythonexe = sys.executable + if os.name in ['nt', 'dos']: + fdir, fn = os.path.split(pythonexe) + fn = fn.upper().replace('PYTHONW', 'PYTHON') + pythonexe = os.path.join(fdir, fn) + assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,) + return pythonexe + +def find_executable(exe, path=None, _cache={}): + """Return full path of a executable or None. + + Symbolic links are not followed. + """ + key = exe, path + try: + return _cache[key] + except KeyError: + pass + log.debug('find_executable(%r)' % exe) + orig_exe = exe + + if path is None: + path = os.environ.get('PATH', os.defpath) + if os.name=='posix': + realpath = os.path.realpath + else: + realpath = lambda a:a + + if exe.startswith('"'): + exe = exe[1:-1] + + suffixes = [''] + if os.name in ['nt', 'dos', 'os2']: + fn, ext = os.path.splitext(exe) + extra_suffixes = ['.exe', '.com', '.bat'] + if ext.lower() not in extra_suffixes: + suffixes = extra_suffixes + + if os.path.isabs(exe): + paths = [''] + else: + paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ] + + for path in paths: + fn = os.path.join(path, exe) + for s in suffixes: + f_ext = fn+s + if not os.path.islink(f_ext): + f_ext = realpath(f_ext) + if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK): + log.info('Found executable %s' % f_ext) + _cache[key] = f_ext + return f_ext + + log.warn('Could not locate executable %s' % orig_exe) + return None + +############################################################ + +def _preserve_environment( names ): + log.debug('_preserve_environment(%r)' % (names)) + env = {name: os.environ.get(name) for name in names} + return env + +def _update_environment( **env ): + log.debug('_update_environment(...)') + for name, value in env.items(): + os.environ[name] = value or '' + +def exec_command(command, execute_in='', use_shell=None, use_tee=None, + _with_python = 1, **env ): + """ + Return (status,output) of executed command. + + .. deprecated:: 1.17 + Use subprocess.Popen instead + + Parameters + ---------- + command : str + A concatenated string of executable and arguments. + execute_in : str + Before running command ``cd execute_in`` and after ``cd -``. + use_shell : {bool, None}, optional + If True, execute ``sh -c command``. Default None (True) + use_tee : {bool, None}, optional + If True use tee. Default None (True) + + + Returns + ------- + res : str + Both stdout and stderr messages. + + Notes + ----- + On NT, DOS systems the returned status is correct for external commands. + Wild cards will not work for non-posix systems or when use_shell=0. + + """ + # 2019-01-30, 1.17 + warnings.warn('exec_command is deprecated since NumPy v1.17, use ' + 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) + log.debug('exec_command(%r,%s)' % (command, + ','.join(['%s=%r'%kv for kv in env.items()]))) + + if use_tee is None: + use_tee = os.name=='posix' + if use_shell is None: + use_shell = os.name=='posix' + execute_in = os.path.abspath(execute_in) + oldcwd = os.path.abspath(os.getcwd()) + + if __name__[-12:] == 'exec_command': + exec_dir = os.path.dirname(os.path.abspath(__file__)) + elif os.path.isfile('exec_command.py'): + exec_dir = os.path.abspath('.') + else: + exec_dir = os.path.abspath(sys.argv[0]) + if os.path.isfile(exec_dir): + exec_dir = os.path.dirname(exec_dir) + + if oldcwd!=execute_in: + os.chdir(execute_in) + log.debug('New cwd: %s' % execute_in) + else: + log.debug('Retaining cwd: %s' % oldcwd) + + oldenv = _preserve_environment( list(env.keys()) ) + _update_environment( **env ) + + try: + st = _exec_command(command, + use_shell=use_shell, + use_tee=use_tee, + **env) + finally: + if oldcwd!=execute_in: + os.chdir(oldcwd) + log.debug('Restored cwd to %s' % oldcwd) + _update_environment(**oldenv) + + return st + + +def _exec_command(command, use_shell=None, use_tee = None, **env): + """ + Internal workhorse for exec_command(). + """ + if use_shell is None: + use_shell = os.name=='posix' + if use_tee is None: + use_tee = os.name=='posix' + + if os.name == 'posix' and use_shell: + # On POSIX, subprocess always uses /bin/sh, override + sh = os.environ.get('SHELL', '/bin/sh') + if is_sequence(command): + command = [sh, '-c', ' '.join(command)] + else: + command = [sh, '-c', command] + use_shell = False + + elif os.name == 'nt' and is_sequence(command): + # On Windows, join the string for CreateProcess() ourselves as + # subprocess does it a bit differently + command = ' '.join(_quote_arg(arg) for arg in command) + + # Inherit environment by default + env = env or None + try: + # text is set to False so that communicate() + # will return bytes. We need to decode the output ourselves + # so that Python will not raise a UnicodeDecodeError when + # it encounters an invalid character; rather, we simply replace it + proc = subprocess.Popen(command, shell=use_shell, env=env, text=False, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + except OSError: + # Return 127, as os.spawn*() and /bin/sh do + return 127, '' + + text, err = proc.communicate() + mylocale = locale.getpreferredencoding(False) + if mylocale is None: + mylocale = 'ascii' + text = text.decode(mylocale, errors='replace') + text = text.replace('\r\n', '\n') + # Another historical oddity + if text[-1:] == '\n': + text = text[:-1] + + if use_tee and text: + print(text) + return proc.returncode, text + + +def _quote_arg(arg): + """ + Quote the argument for safe use in a shell command line. + """ + # If there is a quote in the string, assume relevants parts of the + # string are already quoted (e.g. '-I"C:\\Program Files\\..."') + if '"' not in arg and ' ' in arg: + return '"%s"' % arg + return arg + +############################################################ diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/extension.py b/.venv/lib/python3.11/site-packages/numpy/distutils/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..3ede013e0f3c6f2ed20690a7b2a260c2592ab3f4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/extension.py @@ -0,0 +1,107 @@ +"""distutils.extension + +Provides the Extension class, used to describe C/C++ extension +modules in setup scripts. + +Overridden to support f2py. + +""" +import re +from distutils.extension import Extension as old_Extension + + +cxx_ext_re = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_pyf_ext_re = re.compile(r'.*\.(f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match + + +class Extension(old_Extension): + """ + Parameters + ---------- + name : str + Extension name. + sources : list of str + List of source file locations relative to the top directory of + the package. + extra_compile_args : list of str + Extra command line arguments to pass to the compiler. + extra_f77_compile_args : list of str + Extra command line arguments to pass to the fortran77 compiler. + extra_f90_compile_args : list of str + Extra command line arguments to pass to the fortran90 compiler. + """ + def __init__( + self, name, sources, + include_dirs=None, + define_macros=None, + undef_macros=None, + library_dirs=None, + libraries=None, + runtime_library_dirs=None, + extra_objects=None, + extra_compile_args=None, + extra_link_args=None, + export_symbols=None, + swig_opts=None, + depends=None, + language=None, + f2py_options=None, + module_dirs=None, + extra_c_compile_args=None, + extra_cxx_compile_args=None, + extra_f77_compile_args=None, + extra_f90_compile_args=None,): + + old_Extension.__init__( + self, name, [], + include_dirs=include_dirs, + define_macros=define_macros, + undef_macros=undef_macros, + library_dirs=library_dirs, + libraries=libraries, + runtime_library_dirs=runtime_library_dirs, + extra_objects=extra_objects, + extra_compile_args=extra_compile_args, + extra_link_args=extra_link_args, + export_symbols=export_symbols) + + # Avoid assert statements checking that sources contains strings: + self.sources = sources + + # Python 2.4 distutils new features + self.swig_opts = swig_opts or [] + # swig_opts is assumed to be a list. Here we handle the case where it + # is specified as a string instead. + if isinstance(self.swig_opts, str): + import warnings + msg = "swig_opts is specified as a string instead of a list" + warnings.warn(msg, SyntaxWarning, stacklevel=2) + self.swig_opts = self.swig_opts.split() + + # Python 2.3 distutils new features + self.depends = depends or [] + self.language = language + + # numpy_distutils features + self.f2py_options = f2py_options or [] + self.module_dirs = module_dirs or [] + self.extra_c_compile_args = extra_c_compile_args or [] + self.extra_cxx_compile_args = extra_cxx_compile_args or [] + self.extra_f77_compile_args = extra_f77_compile_args or [] + self.extra_f90_compile_args = extra_f90_compile_args or [] + + return + + def has_cxx_sources(self): + for source in self.sources: + if cxx_ext_re(str(source)): + return True + return False + + def has_f2py_sources(self): + for source in self.sources: + if fortran_pyf_ext_re(source): + return True + return False + +# class Extension diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/from_template.py b/.venv/lib/python3.11/site-packages/numpy/distutils/from_template.py new file mode 100644 index 0000000000000000000000000000000000000000..90d1f4c384c7807c621eada8ed7685e5845c5c56 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/from_template.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 +""" + +process_file(filename) + + takes templated file .xxx.src and produces .xxx file where .xxx + is .pyf .f90 or .f using the following template rules: + + '<..>' denotes a template. + + All function and subroutine blocks in a source file with names that + contain '<..>' will be replicated according to the rules in '<..>'. + + The number of comma-separated words in '<..>' will determine the number of + replicates. + + '<..>' may have two different forms, named and short. For example, + + named: + where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + + +""" +__all__ = ['process_str', 'process_file'] + +import os +import sys +import re + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i==-1: + break + start = i + if astr[i:i+7]!='\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = m and m.end()-1 or len(astr) + spanlist.append((start, end)) + return spanlist + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = '__l%s' % (n) + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return "<%s>" % (thelist) + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return "<%s>" % name + + substr = list_re.sub(listrepl, substr) # convert all lists to named templates + # newnames are constructed as needed + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError('No replicates found for <%s>' % (r)) + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + print("Mismatch in number of replacements (base <%s=%s>)" + " for <%s=%s>. Ignoring." % + (base_rule, ','.join(rules[base_rule]), r, thelist)) + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k+1)*[name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +def main(): + try: + file = sys.argv[1] + except IndexError: + fid = sys.stdin + outfile = sys.stdout + else: + fid = open(file, 'r') + (base, ext) = os.path.splitext(file) + newname = base + outfile = open(newname, 'w') + + allstr = fid.read() + writestr = process_str(allstr) + outfile.write(writestr) + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/fujitsuccompiler.py b/.venv/lib/python3.11/site-packages/numpy/distutils/fujitsuccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..c25900b34f1ddad3274d9eca1fb4369b39f7437a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/fujitsuccompiler.py @@ -0,0 +1,28 @@ +from distutils.unixccompiler import UnixCCompiler + +class FujitsuCCompiler(UnixCCompiler): + + """ + Fujitsu compiler. + """ + + compiler_type = 'fujitsu' + cc_exe = 'fcc' + cxx_exe = 'FCC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables( + compiler=cc_compiler + + ' -O3 -Nclang -fPIC', + compiler_so=cc_compiler + + ' -O3 -Nclang -fPIC', + compiler_cxx=cxx_compiler + + ' -O3 -Nclang -fPIC', + linker_exe=cc_compiler + + ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared', + linker_so=cc_compiler + + ' -lfj90i -lfj90f -lfjsrcinfo -lelf -shared' + ) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/intelccompiler.py b/.venv/lib/python3.11/site-packages/numpy/distutils/intelccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..0fa1c11dd6763ac4b673dd2c0e6ecd1aeed522f8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/intelccompiler.py @@ -0,0 +1,111 @@ +import platform + +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.exec_command import find_executable +from numpy.distutils.ccompiler import simple_version_match +if platform.system() == 'Windows': + from numpy.distutils.msvc9compiler import MSVCCompiler + + +class IntelCCompiler(UnixCCompiler): + """A modified Intel compiler compatible with a GCC-built Python.""" + compiler_type = 'intel' + cc_exe = 'icc' + cc_args = 'fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +class IntelItaniumCCompiler(IntelCCompiler): + compiler_type = 'intele' + + # On Itanium, the Intel Compiler used to be called ecc, let's search for + # it (now it's also icc, so ecc is last in the search). + for cc_exe in map(find_executable, ['icc', 'ecc']): + if cc_exe: + break + + +class IntelEM64TCCompiler(UnixCCompiler): + """ + A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python. + """ + compiler_type = 'intelem' + cc_exe = 'icc -m64' + cc_args = '-fPIC' + + def __init__(self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__(self, verbose, dry_run, force) + + v = self.get_version() + mpopt = 'openmp' if v and v < '15' else 'qopenmp' + self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 ' + '-fomit-frame-pointer -{}').format(mpopt) + compiler = self.cc_exe + + if platform.system() == 'Darwin': + shared_flag = '-Wl,-undefined,dynamic_lookup' + else: + shared_flag = '-shared' + self.set_executables(compiler=compiler, + compiler_so=compiler, + compiler_cxx=compiler, + archiver='xiar' + ' cru', + linker_exe=compiler + ' -shared-intel', + linker_so=compiler + ' ' + shared_flag + + ' -shared-intel') + + +if platform.system() == 'Windows': + class IntelCCompilerW(MSVCCompiler): + """ + A modified Intel compiler compatible with an MSVC-built Python. + """ + compiler_type = 'intelw' + compiler_cxx = 'icl' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?32,') + self.__version = version_match + + def initialize(self, plat_name=None): + MSVCCompiler.initialize(self, plat_name) + self.cc = self.find_exe('icl.exe') + self.lib = self.find_exe('xilib') + self.linker = self.find_exe('xilink') + self.compile_options = ['/nologo', '/O3', '/MD', '/W3', + '/Qstd=c99'] + self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', + '/Qstd=c99', '/Z7', '/D_DEBUG'] + + class IntelEM64TCCompilerW(IntelCCompilerW): + """ + A modified Intel x86_64 compiler compatible with + a 64bit MSVC-built Python. + """ + compiler_type = 'intelemw' + + def __init__(self, verbose=0, dry_run=0, force=0): + MSVCCompiler.__init__(self, verbose, dry_run, force) + version_match = simple_version_match(start=r'Intel\(R\).*?64,') + self.__version = version_match diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/lib2def.py b/.venv/lib/python3.11/site-packages/numpy/distutils/lib2def.py new file mode 100644 index 0000000000000000000000000000000000000000..851682c633109e4d8644d80bb501e5cafcd39d04 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/lib2def.py @@ -0,0 +1,116 @@ +import re +import sys +import subprocess + +__doc__ = """This module generates a DEF file from the symbols in +an MSVC-compiled DLL import library. It correctly discriminates between +data and functions. The data is collected from the output of the program +nm(1). + +Usage: + python lib2def.py [libname.lib] [output.def] +or + python lib2def.py [libname.lib] > output.def + +libname.lib defaults to python.lib and output.def defaults to stdout + +Author: Robert Kern +Last Update: April 30, 1999 +""" + +__version__ = '0.1a' + +py_ver = "%d%d" % tuple(sys.version_info[:2]) + +DEFAULT_NM = ['nm', '-Cs'] + +DEF_HEADER = """LIBRARY python%s.dll +;CODE PRELOAD MOVEABLE DISCARDABLE +;DATA PRELOAD SINGLE + +EXPORTS +""" % py_ver +# the header of the DEF file + +FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) +DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) + +def parse_cmd(): + """Parses the command-line arguments. + +libfile, deffile = parse_cmd()""" + if len(sys.argv) == 3: + if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': + libfile, deffile = sys.argv[1:] + elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': + deffile, libfile = sys.argv[1:] + else: + print("I'm assuming that your first argument is the library") + print("and the second is the DEF file.") + elif len(sys.argv) == 2: + if sys.argv[1][-4:] == '.def': + deffile = sys.argv[1] + libfile = 'python%s.lib' % py_ver + elif sys.argv[1][-4:] == '.lib': + deffile = None + libfile = sys.argv[1] + else: + libfile = 'python%s.lib' % py_ver + deffile = None + return libfile, deffile + +def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True): + """Returns the output of nm_cmd via a pipe. + +nm_output = getnm(nm_cmd = 'nm -Cs py_lib')""" + p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, text=True) + nm_output, nm_err = p.communicate() + if p.returncode != 0: + raise RuntimeError('failed to run "%s": "%s"' % ( + ' '.join(nm_cmd), nm_err)) + return nm_output + +def parse_nm(nm_output): + """Returns a tuple of lists: dlist for the list of data +symbols and flist for the list of function symbols. + +dlist, flist = parse_nm(nm_output)""" + data = DATA_RE.findall(nm_output) + func = FUNC_RE.findall(nm_output) + + flist = [] + for sym in data: + if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): + flist.append(sym) + + dlist = [] + for sym in data: + if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): + dlist.append(sym) + + dlist.sort() + flist.sort() + return dlist, flist + +def output_def(dlist, flist, header, file = sys.stdout): + """Outputs the final DEF file to a file defaulting to stdout. + +output_def(dlist, flist, header, file = sys.stdout)""" + for data_sym in dlist: + header = header + '\t%s DATA\n' % data_sym + header = header + '\n' # blank line + for func_sym in flist: + header = header + '\t%s\n' % func_sym + file.write(header) + +if __name__ == '__main__': + libfile, deffile = parse_cmd() + if deffile is None: + deffile = sys.stdout + else: + deffile = open(deffile, 'w') + nm_cmd = DEFAULT_NM + [str(libfile)] + nm_output = getnm(nm_cmd, shell=False) + dlist, flist = parse_nm(nm_output) + output_def(dlist, flist, DEF_HEADER, deffile) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/line_endings.py b/.venv/lib/python3.11/site-packages/numpy/distutils/line_endings.py new file mode 100644 index 0000000000000000000000000000000000000000..686e5ebd937fff16d5aa7f154d5c823ed17d9e0a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/line_endings.py @@ -0,0 +1,77 @@ +""" Functions for converting from DOS to UNIX line endings + +""" +import os +import re +import sys + + +def dos2unix(file): + "Replace CRLF with LF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + with open(file, "rb") as fp: + data = fp.read() + if '\0' in data: + print(file, "Binary!") + return + + newdata = re.sub("\r\n", "\n", data) + if newdata != data: + print('dos2unix:', file) + with open(file, "wb") as f: + f.write(newdata) + return file + else: + print(file, 'ok') + +def dos2unix_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + file = dos2unix(full_path) + if file is not None: + modified_files.append(file) + +def dos2unix_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, dos2unix_one_dir, modified_files) + return modified_files +#---------------------------------- + +def unix2dos(file): + "Replace LF with CRLF in argument files. Print names of changed files." + if os.path.isdir(file): + print(file, "Directory!") + return + + with open(file, "rb") as fp: + data = fp.read() + if '\0' in data: + print(file, "Binary!") + return + newdata = re.sub("\r\n", "\n", data) + newdata = re.sub("\n", "\r\n", newdata) + if newdata != data: + print('unix2dos:', file) + with open(file, "wb") as f: + f.write(newdata) + return file + else: + print(file, 'ok') + +def unix2dos_one_dir(modified_files, dir_name, file_names): + for file in file_names: + full_path = os.path.join(dir_name, file) + unix2dos(full_path) + if file is not None: + modified_files.append(file) + +def unix2dos_dir(dir_name): + modified_files = [] + os.path.walk(dir_name, unix2dos_one_dir, modified_files) + return modified_files + +if __name__ == "__main__": + dos2unix_dir(sys.argv[1]) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/log.py b/.venv/lib/python3.11/site-packages/numpy/distutils/log.py new file mode 100644 index 0000000000000000000000000000000000000000..3347f56d6fe95ebe5388de8d740ef4ddf8db317d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/log.py @@ -0,0 +1,111 @@ +# Colored log +import sys +from distutils.log import * # noqa: F403 +from distutils.log import Log as old_Log +from distutils.log import _global_log + +from numpy.distutils.misc_util import (red_text, default_text, cyan_text, + green_text, is_sequence, is_string) + + +def _fix_args(args,flag=1): + if is_string(args): + return args.replace('%', '%%') + if flag and is_sequence(args): + return tuple([_fix_args(a, flag=0) for a in args]) + return args + + +class Log(old_Log): + def _log(self, level, msg, args): + if level >= self.threshold: + if args: + msg = msg % _fix_args(args) + if 0: + if msg.startswith('copying ') and msg.find(' -> ') != -1: + return + if msg.startswith('byte-compiling '): + return + print(_global_color_map[level](msg)) + sys.stdout.flush() + + def good(self, msg, *args): + """ + If we log WARN messages, log this message as a 'nice' anti-warn + message. + + """ + if WARN >= self.threshold: + if args: + print(green_text(msg % _fix_args(args))) + else: + print(green_text(msg)) + sys.stdout.flush() + + +_global_log.__class__ = Log + +good = _global_log.good + +def set_threshold(level, force=False): + prev_level = _global_log.threshold + if prev_level > DEBUG or force: + # If we're running at DEBUG, don't change the threshold, as there's + # likely a good reason why we're running at this level. + _global_log.threshold = level + if level <= DEBUG: + info('set_threshold: setting threshold to DEBUG level,' + ' it can be changed only with force argument') + else: + info('set_threshold: not changing threshold from DEBUG level' + ' %s to %s' % (prev_level, level)) + return prev_level + +def get_threshold(): + return _global_log.threshold + +def set_verbosity(v, force=False): + prev_level = _global_log.threshold + if v < 0: + set_threshold(ERROR, force) + elif v == 0: + set_threshold(WARN, force) + elif v == 1: + set_threshold(INFO, force) + elif v >= 2: + set_threshold(DEBUG, force) + return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level, 1) + + +_global_color_map = { + DEBUG:cyan_text, + INFO:default_text, + WARN:red_text, + ERROR:red_text, + FATAL:red_text +} + +# don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. +set_verbosity(0, force=True) + + +_error = error +_warn = warn +_info = info +_debug = debug + + +def error(msg, *a, **kw): + _error(f"ERROR: {msg}", *a, **kw) + + +def warn(msg, *a, **kw): + _warn(f"WARN: {msg}", *a, **kw) + + +def info(msg, *a, **kw): + _info(f"INFO: {msg}", *a, **kw) + + +def debug(msg, *a, **kw): + _debug(f"DEBUG: {msg}", *a, **kw) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/mingw32ccompiler.py b/.venv/lib/python3.11/site-packages/numpy/distutils/mingw32ccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..4763f41ad326d464355fd82ceccb019e1e55edf0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/mingw32ccompiler.py @@ -0,0 +1,591 @@ +""" +Support code for building Python extensions on Windows. + + # NT stuff + # 1. Make sure libpython.a exists for gcc. If not, build it. + # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) + # 3. Force windows to use g77 + +""" +import os +import sys +import subprocess +import re +import textwrap + +# Overwrite certain distutils.ccompiler functions: +import numpy.distutils.ccompiler # noqa: F401 +from numpy.distutils import log +# NT stuff +# 1. Make sure libpython.a exists for gcc. If not, build it. +# 2. Force windows to use gcc (we're struggling with MSVC and g77 support) +# --> this is done in numpy/distutils/ccompiler.py +# 3. Force windows to use g77 + +import distutils.cygwinccompiler +from distutils.unixccompiler import UnixCCompiler +from distutils.msvccompiler import get_build_version as get_build_msvc_version +from distutils.errors import UnknownFileError +from numpy.distutils.misc_util import (msvc_runtime_library, + msvc_runtime_version, + msvc_runtime_major, + get_build_architecture) + +def get_msvcr_replacement(): + """Replacement for outdated version of get_msvcr from cygwinccompiler""" + msvcr = msvc_runtime_library() + return [] if msvcr is None else [msvcr] + + +# Useful to generate table of symbols from a dll +_START = re.compile(r'\[Ordinal/Name Pointer\] Table') +_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') + +# the same as cygwin plus some additional parameters +class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): + """ A modified MingW32 compiler compatible with an MSVC built Python. + + """ + + compiler_type = 'mingw32' + + def __init__ (self, + verbose=0, + dry_run=0, + force=0): + + distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, + dry_run, force) + + # **changes: eric jones 4/11/01 + # 1. Check for import library on Windows. Build if it doesn't exist. + + build_import_library() + + # Check for custom msvc runtime library on Windows. Build if it doesn't exist. + msvcr_success = build_msvcr_library() + msvcr_dbg_success = build_msvcr_library(debug=True) + if msvcr_success or msvcr_dbg_success: + # add preprocessor statement for using customized msvcr lib + self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') + + # Define the MSVC version as hint for MinGW + msvcr_version = msvc_runtime_version() + if msvcr_version: + self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) + + # MS_WIN64 should be defined when building for amd64 on windows, + # but python headers define it only for MS compilers, which has all + # kind of bad consequences, like using Py_ModuleInit4 instead of + # Py_ModuleInit4_64, etc... So we add it here + if get_build_architecture() == 'AMD64': + self.set_executables( + compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', + compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall ' + '-Wstrict-prototypes', + linker_exe='gcc -g', + linker_so='gcc -g -shared') + else: + self.set_executables( + compiler='gcc -O2 -Wall', + compiler_so='gcc -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ ', + linker_so='g++ -shared') + # added for python2.3 support + # we can't pass it through set_executables because pre 2.2 would fail + self.compiler_cxx = ['g++'] + + # Maybe we should also append -mthreads, but then the finished dlls + # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support + # thread-safe exception handling on `Mingw32') + + # no additional libraries needed + #self.dll_libraries=[] + return + + # __init__ () + + def link(self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + export_symbols = None, + debug=0, + extra_preargs=None, + extra_postargs=None, + build_temp=None, + target_lang=None): + # Include the appropriate MSVC runtime library if Python was built + # with MSVC >= 7.0 (MinGW standard is msvcrt) + runtime_library = msvc_runtime_library() + if runtime_library: + if not libraries: + libraries = [] + libraries.append(runtime_library) + args = (self, + target_desc, + objects, + output_filename, + output_dir, + libraries, + library_dirs, + runtime_library_dirs, + None, #export_symbols, we do this in our def-file + debug, + extra_preargs, + extra_postargs, + build_temp, + target_lang) + func = UnixCCompiler.link + func(*args[:func.__code__.co_argcount]) + return + + def object_filenames (self, + source_filenames, + strip_dir=0, + output_dir=''): + if output_dir is None: output_dir = '' + obj_names = [] + for src_name in source_filenames: + # use normcase to make sure '.rc' is really '.rc' and not '.RC' + (base, ext) = os.path.splitext (os.path.normcase(src_name)) + + # added these lines to strip off windows drive letters + # without it, .o files are placed next to .c files + # instead of the build directory + drv, base = os.path.splitdrive(base) + if drv: + base = base[1:] + + if ext not in (self.src_extensions + ['.rc', '.res']): + raise UnknownFileError( + "unknown file type '%s' (from '%s')" % \ + (ext, src_name)) + if strip_dir: + base = os.path.basename (base) + if ext == '.res' or ext == '.rc': + # these need to be compiled to object files + obj_names.append (os.path.join (output_dir, + base + ext + self.obj_extension)) + else: + obj_names.append (os.path.join (output_dir, + base + self.obj_extension)) + return obj_names + + # object_filenames () + + +def find_python_dll(): + # We can't do much here: + # - find it in the virtualenv (sys.prefix) + # - find it in python main dir (sys.base_prefix, if in a virtualenv) + # - in system32, + # - ortherwise (Sxs), I don't know how to get it. + stems = [sys.prefix] + if sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + + sub_dirs = ['', 'lib', 'bin'] + # generate possible combinations of directory trees and sub-directories + lib_dirs = [] + for stem in stems: + for folder in sub_dirs: + lib_dirs.append(os.path.join(stem, folder)) + + # add system directory as well + if 'SYSTEMROOT' in os.environ: + lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) + + # search in the file system for possible candidates + major_version, minor_version = tuple(sys.version_info[:2]) + implementation = sys.implementation.name + if implementation == 'cpython': + dllname = f'python{major_version}{minor_version}.dll' + elif implementation == 'pypy': + dllname = f'libpypy{major_version}.{minor_version}-c.dll' + else: + dllname = f'Unknown platform {implementation}' + print("Looking for %s" % dllname) + for folder in lib_dirs: + dll = os.path.join(folder, dllname) + if os.path.exists(dll): + return dll + + raise ValueError("%s not found in %s" % (dllname, lib_dirs)) + +def dump_table(dll): + st = subprocess.check_output(["objdump.exe", "-p", dll]) + return st.split(b'\n') + +def generate_def(dll, dfile): + """Given a dll file location, get all its exported symbols and dump them + into the given def file. + + The .def file will be overwritten""" + dump = dump_table(dll) + for i in range(len(dump)): + if _START.match(dump[i].decode()): + break + else: + raise ValueError("Symbol table not found") + + syms = [] + for j in range(i+1, len(dump)): + m = _TABLE.match(dump[j].decode()) + if m: + syms.append((int(m.group(1).strip()), m.group(2))) + else: + break + + if len(syms) == 0: + log.warn('No symbols found in %s' % dll) + + with open(dfile, 'w') as d: + d.write('LIBRARY %s\n' % os.path.basename(dll)) + d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') + d.write(';DATA PRELOAD SINGLE\n') + d.write('\nEXPORTS\n') + for s in syms: + #d.write('@%d %s\n' % (s[0], s[1])) + d.write('%s\n' % s[1]) + +def find_dll(dll_name): + + arch = {'AMD64' : 'amd64', + 'Intel' : 'x86'}[get_build_architecture()] + + def _find_dll_in_winsxs(dll_name): + # Walk through the WinSxS directory to find the dll. + winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), + 'winsxs') + if not os.path.exists(winsxs_path): + return None + for root, dirs, files in os.walk(winsxs_path): + if dll_name in files and arch in root: + return os.path.join(root, dll_name) + return None + + def _find_dll_in_path(dll_name): + # First, look in the Python directory, then scan PATH for + # the given dll name. + for path in [sys.prefix] + os.environ['PATH'].split(';'): + filepath = os.path.join(path, dll_name) + if os.path.exists(filepath): + return os.path.abspath(filepath) + + return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) + +def build_msvcr_library(debug=False): + if os.name != 'nt': + return False + + # If the version number is None, then we couldn't find the MSVC runtime at + # all, because we are running on a Python distribution which is customed + # compiled; trust that the compiler is the same as the one available to us + # now, and that it is capable of linking with the correct runtime without + # any extra options. + msvcr_ver = msvc_runtime_major() + if msvcr_ver is None: + log.debug('Skip building import library: ' + 'Runtime is not compiled with MSVC') + return False + + # Skip using a custom library for versions < MSVC 8.0 + if msvcr_ver < 80: + log.debug('Skip building msvcr library:' + ' custom functionality not present') + return False + + msvcr_name = msvc_runtime_library() + if debug: + msvcr_name += 'd' + + # Skip if custom library already exists + out_name = "lib%s.a" % msvcr_name + out_file = os.path.join(sys.prefix, 'libs', out_name) + if os.path.isfile(out_file): + log.debug('Skip building msvcr library: "%s" exists' % + (out_file,)) + return True + + # Find the msvcr dll + msvcr_dll_name = msvcr_name + '.dll' + dll_file = find_dll(msvcr_dll_name) + if not dll_file: + log.warn('Cannot build msvcr library: "%s" not found' % + msvcr_dll_name) + return False + + def_name = "lib%s.def" % msvcr_name + def_file = os.path.join(sys.prefix, 'libs', def_name) + + log.info('Building msvcr library: "%s" (from %s)' \ + % (out_file, dll_file)) + + # Generate a symbol definition file from the msvcr dll + generate_def(dll_file, def_file) + + # Create a custom mingw library for the given symbol definitions + cmd = ['dlltool', '-d', def_file, '-l', out_file] + retcode = subprocess.call(cmd) + + # Clean up symbol definitions + os.remove(def_file) + + return (not retcode) + +def build_import_library(): + if os.name != 'nt': + return + + arch = get_build_architecture() + if arch == 'AMD64': + return _build_import_library_amd64() + elif arch == 'Intel': + return _build_import_library_x86() + else: + raise ValueError("Unhandled arch %s" % arch) + +def _check_for_import_lib(): + """Check if an import library for the Python runtime already exists.""" + major_version, minor_version = tuple(sys.version_info[:2]) + + # patterns for the file name of the library itself + patterns = ['libpython%d%d.a', + 'libpython%d%d.dll.a', + 'libpython%d.%d.dll.a'] + + # directory trees that may contain the library + stems = [sys.prefix] + if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: + stems.append(sys.base_prefix) + elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: + stems.append(sys.real_prefix) + + # possible subdirectories within those trees where it is placed + sub_dirs = ['libs', 'lib'] + + # generate a list of candidate locations + candidates = [] + for pat in patterns: + filename = pat % (major_version, minor_version) + for stem_dir in stems: + for folder in sub_dirs: + candidates.append(os.path.join(stem_dir, folder, filename)) + + # test the filesystem to see if we can find any of these + for fullname in candidates: + if os.path.isfile(fullname): + # already exists, in location given + return (True, fullname) + + # needs to be built, preferred location given first + return (False, candidates[0]) + +def _build_import_library_amd64(): + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + # get the runtime dll for which we are building import library + dll_file = find_python_dll() + log.info('Building import library (arch=AMD64): "%s" (from %s)' % + (out_file, dll_file)) + + # generate symbol list from this library + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + generate_def(dll_file, def_file) + + # generate import library from this symbol list + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.check_call(cmd) + +def _build_import_library_x86(): + """ Build the import libraries for Mingw32-gcc on Windows + """ + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) + lib_file = os.path.join(sys.prefix, 'libs', lib_name) + if not os.path.isfile(lib_file): + # didn't find library file in virtualenv, try base distribution, too, + # and use that instead if found there. for Python 2.7 venvs, the base + # directory is in attribute real_prefix instead of base_prefix. + if hasattr(sys, 'base_prefix'): + base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) + elif hasattr(sys, 'real_prefix'): + base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) + else: + base_lib = '' # os.path.isfile('') == False + + if os.path.isfile(base_lib): + lib_file = base_lib + else: + log.warn('Cannot build import library: "%s" not found', lib_file) + return + log.info('Building import library (ARCH=x86): "%s"', out_file) + + from numpy.distutils import lib2def + + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + nm_output = lib2def.getnm( + lib2def.DEFAULT_NM + [lib_file], shell=False) + dlist, flist = lib2def.parse_nm(nm_output) + with open(def_file, 'w') as fid: + lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid) + + dll_name = find_python_dll () + + cmd = ["dlltool", + "--dllname", dll_name, + "--def", def_file, + "--output-lib", out_file] + status = subprocess.check_output(cmd) + if status: + log.warn('Failed to build import library for gcc. Linking will fail.') + return + +#===================================== +# Dealing with Visual Studio MANIFESTS +#===================================== + +# Functions to deal with visual studio manifests. Manifest are a mechanism to +# enforce strong DLL versioning on windows, and has nothing to do with +# distutils MANIFEST. manifests are XML files with version info, and used by +# the OS loader; they are necessary when linking against a DLL not in the +# system path; in particular, official python 2.6 binary is built against the +# MS runtime 9 (the one from VS 2008), which is not available on most windows +# systems; python 2.6 installer does install it in the Win SxS (Side by side) +# directory, but this requires the manifest for this to work. This is a big +# mess, thanks MS for a wonderful system. + +# XXX: ideally, we should use exactly the same version as used by python. I +# submitted a patch to get this version, but it was only included for python +# 2.6.1 and above. So for versions below, we use a "best guess". +_MSVCRVER_TO_FULLVER = {} +if sys.platform == 'win32': + try: + import msvcrt + # I took one version in my SxS directory: no idea if it is the good + # one, and we can't retrieve it from python + _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" + _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 + # on Windows XP: + _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" + crt_ver = getattr(msvcrt, 'CRT_ASSEMBLY_VERSION', None) + if crt_ver is not None: # Available at least back to Python 3.3 + maj, min = re.match(r'(\d+)\.(\d)', crt_ver).groups() + _MSVCRVER_TO_FULLVER[maj + min] = crt_ver + del maj, min + del crt_ver + except ImportError: + # If we are here, means python was not built with MSVC. Not sure what + # to do in that case: manifest building will fail, but it should not be + # used in that case anyway + log.warn('Cannot import msvcrt: using manifest will not be possible') + +def msvc_manifest_xml(maj, min): + """Given a major and minor version of the MSVCR, returns the + corresponding XML file.""" + try: + fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] + except KeyError: + raise ValueError("Version %d,%d of MSVCRT not supported yet" % + (maj, min)) from None + # Don't be fooled, it looks like an XML, but it is not. In particular, it + # should not have any space before starting, and its size should be + # divisible by 4, most likely for alignment constraints when the xml is + # embedded in the binary... + # This template was copied directly from the python 2.6 binary (using + # strings.exe from mingw on python.exe). + template = textwrap.dedent("""\ + + + + + + + + + + + + + + """) + + return template % {'fullver': fullver, 'maj': maj, 'min': min} + +def manifest_rc(name, type='dll'): + """Return the rc file used to generate the res file which will be embedded + as manifest for given manifest file name, of given type ('dll' or + 'exe'). + + Parameters + ---------- + name : str + name of the manifest file to embed + type : str {'dll', 'exe'} + type of the binary which will embed the manifest + + """ + if type == 'dll': + rctype = 2 + elif type == 'exe': + rctype = 1 + else: + raise ValueError("Type %s not supported" % type) + + return """\ +#include "winuser.h" +%d RT_MANIFEST %s""" % (rctype, name) + +def check_embedded_msvcr_match_linked(msver): + """msver is the ms runtime version used for the MANIFEST.""" + # check msvcr major version are the same for linking and + # embedding + maj = msvc_runtime_major() + if maj: + if not maj == int(msver): + raise ValueError( + "Discrepancy between linked msvcr " \ + "(%d) and the one about to be embedded " \ + "(%d)" % (int(msver), maj)) + +def configtest_name(config): + base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) + return os.path.splitext(base)[0] + +def manifest_name(config): + # Get configest name (including suffix) + root = configtest_name(config) + exext = config.compiler.exe_extension + return root + exext + ".manifest" + +def rc_name(config): + # Get configtest name (including suffix) + root = configtest_name(config) + return root + ".rc" + +def generate_manifest(config): + msver = get_build_msvc_version() + if msver is not None: + if msver >= 8: + check_embedded_msvcr_match_linked(msver) + ma_str, mi_str = str(msver).split('.') + # Write the manifest file + manxml = msvc_manifest_xml(int(ma_str), int(mi_str)) + with open(manifest_name(config), "w") as man: + config.temp_files.append(manifest_name(config)) + man.write(manxml) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/misc_util.py b/.venv/lib/python3.11/site-packages/numpy/distutils/misc_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e226b47448153e34487def3176d5991319312363 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/misc_util.py @@ -0,0 +1,2493 @@ +import os +import re +import sys +import copy +import glob +import atexit +import tempfile +import subprocess +import shutil +import multiprocessing +import textwrap +import importlib.util +from threading import local as tlocal +from functools import reduce + +import distutils +from distutils.errors import DistutilsError + +# stores temporary directory of each thread to only create one per thread +_tdata = tlocal() + +# store all created temporary directories so they can be deleted on exit +_tmpdirs = [] +def clean_up_temporary_directory(): + if _tmpdirs is not None: + for d in _tmpdirs: + try: + shutil.rmtree(d) + except OSError: + pass + +atexit.register(clean_up_temporary_directory) + +__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict', + 'dict_append', 'appendpath', 'generate_config_py', + 'get_cmd', 'allpath', 'get_mathlibs', + 'terminal_has_colors', 'red_text', 'green_text', 'yellow_text', + 'blue_text', 'cyan_text', 'cyg2win32', 'mingw32', 'all_strings', + 'has_f_sources', 'has_cxx_sources', 'filter_sources', + 'get_dependencies', 'is_local_src_dir', 'get_ext_source_files', + 'get_script_files', 'get_lib_source_files', 'get_data_files', + 'dot_join', 'get_frame', 'minrelpath', 'njoin', + 'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language', + 'get_build_architecture', 'get_info', 'get_pkg_info', + 'get_num_build_jobs', 'sanitize_cxx_flags', + 'exec_mod_from_location'] + +class InstallableLib: + """ + Container to hold information on an installable library. + + Parameters + ---------- + name : str + Name of the installed library. + build_info : dict + Dictionary holding build information. + target_dir : str + Absolute path specifying where to install the library. + + See Also + -------- + Configuration.add_installed_library + + Notes + ----- + The three parameters are stored as attributes with the same names. + + """ + def __init__(self, name, build_info, target_dir): + self.name = name + self.build_info = build_info + self.target_dir = target_dir + + +def get_num_build_jobs(): + """ + Get number of parallel build jobs set by the --parallel command line + argument of setup.py + If the command did not receive a setting the environment variable + NPY_NUM_BUILD_JOBS is checked. If that is unset, return the number of + processors on the system, with a maximum of 8 (to prevent + overloading the system if there a lot of CPUs). + + Returns + ------- + out : int + number of parallel jobs that can be run + + """ + from numpy.distutils.core import get_distribution + try: + cpu_count = len(os.sched_getaffinity(0)) + except AttributeError: + cpu_count = multiprocessing.cpu_count() + cpu_count = min(cpu_count, 8) + envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", cpu_count)) + dist = get_distribution() + # may be None during configuration + if dist is None: + return envjobs + + # any of these three may have the job set, take the largest + cmdattr = (getattr(dist.get_command_obj('build'), 'parallel', None), + getattr(dist.get_command_obj('build_ext'), 'parallel', None), + getattr(dist.get_command_obj('build_clib'), 'parallel', None)) + if all(x is None for x in cmdattr): + return envjobs + else: + return max(x for x in cmdattr if x is not None) + +def quote_args(args): + """Quote list of arguments. + + .. deprecated:: 1.22. + """ + import warnings + warnings.warn('"quote_args" is deprecated.', + DeprecationWarning, stacklevel=2) + # don't used _nt_quote_args as it does not check if + # args items already have quotes or not. + args = list(args) + for i in range(len(args)): + a = args[i] + if ' ' in a and a[0] not in '"\'': + args[i] = '"%s"' % (a) + return args + +def allpath(name): + "Convert a /-separated pathname to one using the OS's path separator." + split = name.split('/') + return os.path.join(*split) + +def rel_path(path, parent_path): + """Return path relative to parent_path.""" + # Use realpath to avoid issues with symlinked dirs (see gh-7707) + pd = os.path.realpath(os.path.abspath(parent_path)) + apath = os.path.realpath(os.path.abspath(path)) + if len(apath) < len(pd): + return path + if apath == pd: + return '' + if pd == apath[:len(pd)]: + assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) + path = apath[len(pd)+1:] + return path + +def get_path_from_frame(frame, parent_path=None): + """Return path of the module given a frame object from the call stack. + + Returned path is relative to parent_path when given, + otherwise it is absolute path. + """ + + # First, try to find if the file name is in the frame. + try: + caller_file = eval('__file__', frame.f_globals, frame.f_locals) + d = os.path.dirname(os.path.abspath(caller_file)) + except NameError: + # __file__ is not defined, so let's try __name__. We try this second + # because setuptools spoofs __name__ to be '__main__' even though + # sys.modules['__main__'] might be something else, like easy_install(1). + caller_name = eval('__name__', frame.f_globals, frame.f_locals) + __import__(caller_name) + mod = sys.modules[caller_name] + if hasattr(mod, '__file__'): + d = os.path.dirname(os.path.abspath(mod.__file__)) + else: + # we're probably running setup.py as execfile("setup.py") + # (likely we're building an egg) + d = os.path.abspath('.') + + if parent_path is not None: + d = rel_path(d, parent_path) + + return d or '.' + +def njoin(*path): + """Join two or more pathname components + + - convert a /-separated pathname to one using the OS's path separator. + - resolve `..` and `.` from path. + + Either passing n arguments as in njoin('a','b'), or a sequence + of n names as in njoin(['a','b']) is handled, or a mixture of such arguments. + """ + paths = [] + for p in path: + if is_sequence(p): + # njoin(['a', 'b'], 'c') + paths.append(njoin(*p)) + else: + assert is_string(p) + paths.append(p) + path = paths + if not path: + # njoin() + joined = '' + else: + # njoin('a', 'b') + joined = os.path.join(*path) + if os.path.sep != '/': + joined = joined.replace('/', os.path.sep) + return minrelpath(joined) + +def get_mathlibs(path=None): + """Return the MATHLIB line from numpyconfig.h + """ + if path is not None: + config_file = os.path.join(path, '_numpyconfig.h') + else: + # Look for the file in each of the numpy include directories. + dirs = get_numpy_include_dirs() + for path in dirs: + fn = os.path.join(path, '_numpyconfig.h') + if os.path.exists(fn): + config_file = fn + break + else: + raise DistutilsError('_numpyconfig.h not found in numpy include ' + 'dirs %r' % (dirs,)) + + with open(config_file) as fid: + mathlibs = [] + s = '#define MATHLIB' + for line in fid: + if line.startswith(s): + value = line[len(s):].strip() + if value: + mathlibs.extend(value.split(',')) + return mathlibs + +def minrelpath(path): + """Resolve `..` and '.' from path. + """ + if not is_string(path): + return path + if '.' not in path: + return path + l = path.split(os.sep) + while l: + try: + i = l.index('.', 1) + except ValueError: + break + del l[i] + j = 1 + while l: + try: + i = l.index('..', j) + except ValueError: + break + if l[i-1]=='..': + j += 1 + else: + del l[i], l[i-1] + j = 1 + if not l: + return '' + return os.sep.join(l) + +def sorted_glob(fileglob): + """sorts output of python glob for https://bugs.python.org/issue30461 + to allow extensions to have reproducible build results""" + return sorted(glob.glob(fileglob)) + +def _fix_paths(paths, local_path, include_non_existing): + assert is_sequence(paths), repr(type(paths)) + new_paths = [] + assert not is_string(paths), repr(paths) + for n in paths: + if is_string(n): + if '*' in n or '?' in n: + p = sorted_glob(n) + p2 = sorted_glob(njoin(local_path, n)) + if p2: + new_paths.extend(p2) + elif p: + new_paths.extend(p) + else: + if include_non_existing: + new_paths.append(n) + print('could not resolve pattern in %r: %r' % + (local_path, n)) + else: + n2 = njoin(local_path, n) + if os.path.exists(n2): + new_paths.append(n2) + else: + if os.path.exists(n): + new_paths.append(n) + elif include_non_existing: + new_paths.append(n) + if not os.path.exists(n): + print('non-existing path in %r: %r' % + (local_path, n)) + + elif is_sequence(n): + new_paths.extend(_fix_paths(n, local_path, include_non_existing)) + else: + new_paths.append(n) + return [minrelpath(p) for p in new_paths] + +def gpaths(paths, local_path='', include_non_existing=True): + """Apply glob to paths and prepend local_path if needed. + """ + if is_string(paths): + paths = (paths,) + return _fix_paths(paths, local_path, include_non_existing) + +def make_temp_file(suffix='', prefix='', text=True): + if not hasattr(_tdata, 'tempdir'): + _tdata.tempdir = tempfile.mkdtemp() + _tmpdirs.append(_tdata.tempdir) + fid, name = tempfile.mkstemp(suffix=suffix, + prefix=prefix, + dir=_tdata.tempdir, + text=text) + fo = os.fdopen(fid, 'w') + return fo, name + +# Hooks for colored terminal output. +# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle +def terminal_has_colors(): + if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ: + # Avoid importing curses that causes illegal operation + # with a message: + # PYTHON2 caused an invalid page fault in + # module CYGNURSES7.DLL as 015f:18bbfc28 + # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] + # ssh to Win32 machine from debian + # curses.version is 2.2 + # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) + return 0 + if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty(): + try: + import curses + curses.setupterm() + if (curses.tigetnum("colors") >= 0 + and curses.tigetnum("pairs") >= 0 + and ((curses.tigetstr("setf") is not None + and curses.tigetstr("setb") is not None) + or (curses.tigetstr("setaf") is not None + and curses.tigetstr("setab") is not None) + or curses.tigetstr("scp") is not None)): + return 1 + except Exception: + pass + return 0 + +if terminal_has_colors(): + _colour_codes = dict(black=0, red=1, green=2, yellow=3, + blue=4, magenta=5, cyan=6, white=7, default=9) + def colour_text(s, fg=None, bg=None, bold=False): + seq = [] + if bold: + seq.append('1') + if fg: + fgcode = 30 + _colour_codes.get(fg.lower(), 0) + seq.append(str(fgcode)) + if bg: + bgcode = 40 + _colour_codes.get(bg.lower(), 7) + seq.append(str(bgcode)) + if seq: + return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s) + else: + return s +else: + def colour_text(s, fg=None, bg=None): + return s + +def default_text(s): + return colour_text(s, 'default') +def red_text(s): + return colour_text(s, 'red') +def green_text(s): + return colour_text(s, 'green') +def yellow_text(s): + return colour_text(s, 'yellow') +def cyan_text(s): + return colour_text(s, 'cyan') +def blue_text(s): + return colour_text(s, 'blue') + +######################### + +def cyg2win32(path: str) -> str: + """Convert a path from Cygwin-native to Windows-native. + + Uses the cygpath utility (part of the Base install) to do the + actual conversion. Falls back to returning the original path if + this fails. + + Handles the default ``/cygdrive`` mount prefix as well as the + ``/proc/cygdrive`` portable prefix, custom cygdrive prefixes such + as ``/`` or ``/mnt``, and absolute paths such as ``/usr/src/`` or + ``/home/username`` + + Parameters + ---------- + path : str + The path to convert + + Returns + ------- + converted_path : str + The converted path + + Notes + ----- + Documentation for cygpath utility: + https://cygwin.com/cygwin-ug-net/cygpath.html + Documentation for the C function it wraps: + https://cygwin.com/cygwin-api/func-cygwin-conv-path.html + + """ + if sys.platform != "cygwin": + return path + return subprocess.check_output( + ["/usr/bin/cygpath", "--windows", path], text=True + ) + + +def mingw32(): + """Return true when using mingw32 environment. + """ + if sys.platform=='win32': + if os.environ.get('OSTYPE', '')=='msys': + return True + if os.environ.get('MSYSTEM', '')=='MINGW32': + return True + return False + +def msvc_runtime_version(): + "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" + msc_pos = sys.version.find('MSC v.') + if msc_pos != -1: + msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) + else: + msc_ver = None + return msc_ver + +def msvc_runtime_library(): + "Return name of MSVC runtime library if Python was built with MSVC >= 7" + ver = msvc_runtime_major () + if ver: + if ver < 140: + return "msvcr%i" % ver + else: + return "vcruntime%i" % ver + else: + return None + +def msvc_runtime_major(): + "Return major version of MSVC runtime coded like get_build_msvc_version" + major = {1300: 70, # MSVC 7.0 + 1310: 71, # MSVC 7.1 + 1400: 80, # MSVC 8 + 1500: 90, # MSVC 9 (aka 2008) + 1600: 100, # MSVC 10 (aka 2010) + 1900: 140, # MSVC 14 (aka 2015) + }.get(msvc_runtime_version(), None) + return major + +######################### + +#XXX need support for .C that is also C++ +cxx_ext_match = re.compile(r'.*\.(cpp|cxx|cc)\Z', re.I).match +fortran_ext_match = re.compile(r'.*\.(f90|f95|f77|for|ftn|f)\Z', re.I).match +f90_ext_match = re.compile(r'.*\.(f90|f95)\Z', re.I).match +f90_module_name_match = re.compile(r'\s*module\s*(?P[\w_]+)', re.I).match +def _get_f90_modules(source): + """Return a list of Fortran f90 module names that + given source file defines. + """ + if not f90_ext_match(source): + return [] + modules = [] + with open(source) as f: + for line in f: + m = f90_module_name_match(line) + if m: + name = m.group('name') + modules.append(name) + # break # XXX can we assume that there is one module per file? + return modules + +def is_string(s): + return isinstance(s, str) + +def all_strings(lst): + """Return True if all items in lst are string objects. """ + for item in lst: + if not is_string(item): + return False + return True + +def is_sequence(seq): + if is_string(seq): + return False + try: + len(seq) + except Exception: + return False + return True + +def is_glob_pattern(s): + return is_string(s) and ('*' in s or '?' in s) + +def as_list(seq): + if is_sequence(seq): + return list(seq) + else: + return [seq] + +def get_language(sources): + # not used in numpy/scipy packages, use build_ext.detect_language instead + """Determine language value (c,f77,f90) from sources """ + language = None + for source in sources: + if isinstance(source, str): + if f90_ext_match(source): + language = 'f90' + break + elif fortran_ext_match(source): + language = 'f77' + return language + +def has_f_sources(sources): + """Return True if sources contains Fortran files """ + for source in sources: + if fortran_ext_match(source): + return True + return False + +def has_cxx_sources(sources): + """Return True if sources contains C++ files """ + for source in sources: + if cxx_ext_match(source): + return True + return False + +def filter_sources(sources): + """Return four lists of filenames containing + C, C++, Fortran, and Fortran 90 module sources, + respectively. + """ + c_sources = [] + cxx_sources = [] + f_sources = [] + fmodule_sources = [] + for source in sources: + if fortran_ext_match(source): + modules = _get_f90_modules(source) + if modules: + fmodule_sources.append(source) + else: + f_sources.append(source) + elif cxx_ext_match(source): + cxx_sources.append(source) + else: + c_sources.append(source) + return c_sources, cxx_sources, f_sources, fmodule_sources + + +def _get_headers(directory_list): + # get *.h files from list of directories + headers = [] + for d in directory_list: + head = sorted_glob(os.path.join(d, "*.h")) #XXX: *.hpp files?? + headers.extend(head) + return headers + +def _get_directories(list_of_sources): + # get unique directories from list of sources. + direcs = [] + for f in list_of_sources: + d = os.path.split(f) + if d[0] != '' and not d[0] in direcs: + direcs.append(d[0]) + return direcs + +def _commandline_dep_string(cc_args, extra_postargs, pp_opts): + """ + Return commandline representation used to determine if a file needs + to be recompiled + """ + cmdline = 'commandline: ' + cmdline += ' '.join(cc_args) + cmdline += ' '.join(extra_postargs) + cmdline += ' '.join(pp_opts) + '\n' + return cmdline + + +def get_dependencies(sources): + #XXX scan sources for include statements + return _get_headers(_get_directories(sources)) + +def is_local_src_dir(directory): + """Return true if directory is local directory. + """ + if not is_string(directory): + return False + abs_dir = os.path.abspath(directory) + c = os.path.commonprefix([os.getcwd(), abs_dir]) + new_dir = abs_dir[len(c):].split(os.sep) + if new_dir and not new_dir[0]: + new_dir = new_dir[1:] + if new_dir and new_dir[0]=='build': + return False + new_dir = os.sep.join(new_dir) + return os.path.isdir(new_dir) + +def general_source_files(top_path): + pruned_directories = {'CVS':1, '.svn':1, 'build':1} + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for f in filenames: + if not prune_file_pat.search(f): + yield os.path.join(dirpath, f) + +def general_source_directories_files(top_path): + """Return a directory name relative to top_path and + files contained. + """ + pruned_directories = ['CVS', '.svn', 'build'] + prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$') + for dirpath, dirnames, filenames in os.walk(top_path, topdown=True): + pruned = [ d for d in dirnames if d not in pruned_directories ] + dirnames[:] = pruned + for d in dirnames: + dpath = os.path.join(dirpath, d) + rpath = rel_path(dpath, top_path) + files = [] + for f in os.listdir(dpath): + fn = os.path.join(dpath, f) + if os.path.isfile(fn) and not prune_file_pat.search(fn): + files.append(fn) + yield rpath, files + dpath = top_path + rpath = rel_path(dpath, top_path) + filenames = [os.path.join(dpath, f) for f in os.listdir(dpath) \ + if not prune_file_pat.search(f)] + files = [f for f in filenames if os.path.isfile(f)] + yield rpath, files + + +def get_ext_source_files(ext): + # Get sources and any include files in the same directory. + filenames = [] + sources = [_m for _m in ext.sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + for d in ext.depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_script_files(scripts): + scripts = [_m for _m in scripts if is_string(_m)] + return scripts + +def get_lib_source_files(lib): + filenames = [] + sources = lib[1].get('sources', []) + sources = [_m for _m in sources if is_string(_m)] + filenames.extend(sources) + filenames.extend(get_dependencies(sources)) + depends = lib[1].get('depends', []) + for d in depends: + if is_local_src_dir(d): + filenames.extend(list(general_source_files(d))) + elif os.path.isfile(d): + filenames.append(d) + return filenames + +def get_shared_lib_extension(is_python_ext=False): + """Return the correct file extension for shared libraries. + + Parameters + ---------- + is_python_ext : bool, optional + Whether the shared library is a Python extension. Default is False. + + Returns + ------- + so_ext : str + The shared library extension. + + Notes + ----- + For Python shared libs, `so_ext` will typically be '.so' on Linux and OS X, + and '.pyd' on Windows. For Python >= 3.2 `so_ext` has a tag prepended on + POSIX systems according to PEP 3149. + + """ + confvars = distutils.sysconfig.get_config_vars() + so_ext = confvars.get('EXT_SUFFIX', '') + + if not is_python_ext: + # hardcode known values, config vars (including SHLIB_SUFFIX) are + # unreliable (see #3182) + # darwin, windows and debug linux are wrong in 3.3.1 and older + if (sys.platform.startswith('linux') or + sys.platform.startswith('gnukfreebsd')): + so_ext = '.so' + elif sys.platform.startswith('darwin'): + so_ext = '.dylib' + elif sys.platform.startswith('win'): + so_ext = '.dll' + else: + # fall back to config vars for unknown platforms + # fix long extension for Python >=3.2, see PEP 3149. + if 'SOABI' in confvars: + # Does nothing unless SOABI config var exists + so_ext = so_ext.replace('.' + confvars.get('SOABI'), '', 1) + + return so_ext + +def get_data_files(data): + if is_string(data): + return [data] + sources = data[1] + filenames = [] + for s in sources: + if hasattr(s, '__call__'): + continue + if is_local_src_dir(s): + filenames.extend(list(general_source_files(s))) + elif is_string(s): + if os.path.isfile(s): + filenames.append(s) + else: + print('Not existing data file:', s) + else: + raise TypeError(repr(s)) + return filenames + +def dot_join(*args): + return '.'.join([a for a in args if a]) + +def get_frame(level=0): + """Return frame object from call stack with given level. + """ + try: + return sys._getframe(level+1) + except AttributeError: + frame = sys.exc_info()[2].tb_frame + for _ in range(level+1): + frame = frame.f_back + return frame + + +###################### + +class Configuration: + + _list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', + 'libraries', 'headers', 'scripts', 'py_modules', + 'installed_libraries', 'define_macros'] + _dict_keys = ['package_dir', 'installed_pkg_config'] + _extra_keys = ['name', 'version'] + + numpy_include_dirs = [] + + def __init__(self, + package_name=None, + parent_name=None, + top_path=None, + package_path=None, + caller_level=1, + setup_name='setup.py', + **attrs): + """Construct configuration instance of a package. + + package_name -- name of the package + Ex.: 'distutils' + parent_name -- name of the parent package + Ex.: 'numpy' + top_path -- directory of the toplevel package + Ex.: the directory where the numpy package source sits + package_path -- directory of package. Will be computed by magic from the + directory of the caller module if not specified + Ex.: the directory where numpy.distutils is + caller_level -- frame level to caller namespace, internal parameter. + """ + self.name = dot_join(parent_name, package_name) + self.version = None + + caller_frame = get_frame(caller_level) + self.local_path = get_path_from_frame(caller_frame, top_path) + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + # local_path -- directory of a file (usually setup.py) that + # defines a configuration() function. + if top_path is None: + top_path = self.local_path + self.local_path = '' + if package_path is None: + package_path = self.local_path + elif os.path.isdir(njoin(self.local_path, package_path)): + package_path = njoin(self.local_path, package_path) + if not os.path.isdir(package_path or '.'): + raise ValueError("%r is not a directory" % (package_path,)) + self.top_path = top_path + self.package_path = package_path + # this is the relative path in the installed package + self.path_in_package = os.path.join(*self.name.split('.')) + + self.list_keys = self._list_keys[:] + self.dict_keys = self._dict_keys[:] + + for n in self.list_keys: + v = copy.copy(attrs.get(n, [])) + setattr(self, n, as_list(v)) + + for n in self.dict_keys: + v = copy.copy(attrs.get(n, {})) + setattr(self, n, v) + + known_keys = self.list_keys + self.dict_keys + self.extra_keys = self._extra_keys[:] + for n in attrs.keys(): + if n in known_keys: + continue + a = attrs[n] + setattr(self, n, a) + if isinstance(a, list): + self.list_keys.append(n) + elif isinstance(a, dict): + self.dict_keys.append(n) + else: + self.extra_keys.append(n) + + if os.path.exists(njoin(package_path, '__init__.py')): + self.packages.append(self.name) + self.package_dir[self.name] = package_path + + self.options = dict( + ignore_setup_xxx_py = False, + assume_default_configuration = False, + delegate_options_to_subpackages = False, + quiet = False, + ) + + caller_instance = None + for i in range(1, 3): + try: + f = get_frame(i) + except ValueError: + break + try: + caller_instance = eval('self', f.f_globals, f.f_locals) + break + except NameError: + pass + if isinstance(caller_instance, self.__class__): + if caller_instance.options['delegate_options_to_subpackages']: + self.set_options(**caller_instance.options) + + self.setup_name = setup_name + + def todict(self): + """ + Return a dictionary compatible with the keyword arguments of distutils + setup function. + + Examples + -------- + >>> setup(**config.todict()) #doctest: +SKIP + """ + + self._optimize_data_files() + d = {} + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for n in known_keys: + a = getattr(self, n) + if a: + d[n] = a + return d + + def info(self, message): + if not self.options['quiet']: + print(message) + + def warn(self, message): + sys.stderr.write('Warning: %s\n' % (message,)) + + def set_options(self, **options): + """ + Configure Configuration instance. + + The following options are available: + - ignore_setup_xxx_py + - assume_default_configuration + - delegate_options_to_subpackages + - quiet + + """ + for key, value in options.items(): + if key in self.options: + self.options[key] = value + else: + raise ValueError('Unknown option: '+key) + + def get_distribution(self): + """Return the distutils distribution object for self.""" + from numpy.distutils.core import get_distribution + return get_distribution() + + def _wildcard_get_subpackage(self, subpackage_name, + parent_name, + caller_level = 1): + l = subpackage_name.split('.') + subpackage_path = njoin([self.local_path]+l) + dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)] + config_list = [] + for d in dirs: + if not os.path.isfile(njoin(d, '__init__.py')): + continue + if 'build' in d.split(os.sep): + continue + n = '.'.join(d.split(os.sep)[-len(l):]) + c = self.get_subpackage(n, + parent_name = parent_name, + caller_level = caller_level+1) + config_list.extend(c) + return config_list + + def _get_configuration_from_setup_py(self, setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = 1): + # In case setup_py imports local modules: + sys.path.insert(0, os.path.dirname(setup_py)) + try: + setup_name = os.path.splitext(os.path.basename(setup_py))[0] + n = dot_join(self.name, subpackage_name, setup_name) + setup_module = exec_mod_from_location( + '_'.join(n.split('.')), setup_py) + if not hasattr(setup_module, 'configuration'): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s does not define configuration())'\ + % (setup_module)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level + 1) + else: + pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1])) + args = (pn,) + if setup_module.configuration.__code__.co_argcount > 1: + args = args + (self.top_path,) + config = setup_module.configuration(*args) + if config.name!=dot_join(parent_name, subpackage_name): + self.warn('Subpackage %r configuration returned as %r' % \ + (dot_join(parent_name, subpackage_name), config.name)) + finally: + del sys.path[0] + return config + + def get_subpackage(self,subpackage_name, + subpackage_path=None, + parent_name=None, + caller_level = 1): + """Return list of subpackage configurations. + + Parameters + ---------- + subpackage_name : str or None + Name of the subpackage to get the configuration. '*' in + subpackage_name is handled as a wildcard. + subpackage_path : str + If None, then the path is assumed to be the local path plus the + subpackage_name. If a setup.py file is not found in the + subpackage_path, then a default configuration is used. + parent_name : str + Parent name. + """ + if subpackage_name is None: + if subpackage_path is None: + raise ValueError( + "either subpackage_name or subpackage_path must be specified") + subpackage_name = os.path.basename(subpackage_path) + + # handle wildcards + l = subpackage_name.split('.') + if subpackage_path is None and '*' in subpackage_name: + return self._wildcard_get_subpackage(subpackage_name, + parent_name, + caller_level = caller_level+1) + assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name)) + if subpackage_path is None: + subpackage_path = njoin([self.local_path] + l) + else: + subpackage_path = njoin([subpackage_path] + l[:-1]) + subpackage_path = self.paths([subpackage_path])[0] + setup_py = njoin(subpackage_path, self.setup_name) + if not self.options['ignore_setup_xxx_py']: + if not os.path.isfile(setup_py): + setup_py = njoin(subpackage_path, + 'setup_%s.py' % (subpackage_name)) + if not os.path.isfile(setup_py): + if not self.options['assume_default_configuration']: + self.warn('Assuming default configuration '\ + '(%s/{setup_%s,setup}.py was not found)' \ + % (os.path.dirname(setup_py), subpackage_name)) + config = Configuration(subpackage_name, parent_name, + self.top_path, subpackage_path, + caller_level = caller_level+1) + else: + config = self._get_configuration_from_setup_py( + setup_py, + subpackage_name, + subpackage_path, + parent_name, + caller_level = caller_level + 1) + if config: + return [config] + else: + return [] + + def add_subpackage(self,subpackage_name, + subpackage_path=None, + standalone = False): + """Add a sub-package to the current Configuration instance. + + This is useful in a setup.py script for adding sub-packages to a + package. + + Parameters + ---------- + subpackage_name : str + name of the subpackage + subpackage_path : str + if given, the subpackage path such as the subpackage is in + subpackage_path / subpackage_name. If None,the subpackage is + assumed to be located in the local path / subpackage_name. + standalone : bool + """ + + if standalone: + parent_name = None + else: + parent_name = self.name + config_list = self.get_subpackage(subpackage_name, subpackage_path, + parent_name = parent_name, + caller_level = 2) + if not config_list: + self.warn('No configuration returned, assuming unavailable.') + for config in config_list: + d = config + if isinstance(config, Configuration): + d = config.todict() + assert isinstance(d, dict), repr(type(d)) + + self.info('Appending %s configuration to %s' \ + % (d.get('name'), self.name)) + self.dict_append(**d) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a subpackage '+ subpackage_name) + + def add_data_dir(self, data_path): + """Recursively add files under data_path to data_files list. + + Recursively add files under data_path to the list of data_files to be + installed (and distributed). The data_path can be either a relative + path-name, or an absolute path-name, or a 2-tuple where the first + argument shows where in the install directory the data directory + should be installed to. + + Parameters + ---------- + data_path : seq or str + Argument can be either + + * 2-sequence (, ) + * path to data directory where python datadir suffix defaults + to package dir. + + Notes + ----- + Rules for installation paths:: + + foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar + (gun, foo/bar) -> parent/gun + foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b + (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun + (gun/*, foo/*) -> parent/gun/a, parent/gun/b + /foo/bar -> (bar, /foo/bar) -> parent/bar + (gun, /foo/bar) -> parent/gun + (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar + + Examples + -------- + For example suppose the source directory contains fun/foo.dat and + fun/bar/car.dat: + + >>> self.add_data_dir('fun') #doctest: +SKIP + >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP + >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP + + Will install data-files to the locations:: + + / + fun/ + foo.dat + bar/ + car.dat + sun/ + foo.dat + bar/ + car.dat + gun/ + foo.dat + car.dat + + """ + if is_sequence(data_path): + d, data_path = data_path + else: + d = None + if is_sequence(data_path): + [self.add_data_dir((d, p)) for p in data_path] + return + if not is_string(data_path): + raise TypeError("not a string: %r" % (data_path,)) + if d is None: + if os.path.isabs(data_path): + return self.add_data_dir((os.path.basename(data_path), data_path)) + return self.add_data_dir((data_path, data_path)) + paths = self.paths(data_path, include_non_existing=False) + if is_glob_pattern(data_path): + if is_glob_pattern(d): + pattern_list = allpath(d).split(os.sep) + pattern_list.reverse() + # /a/*//b/ -> /a/*/b + rl = list(range(len(pattern_list)-1)); rl.reverse() + for i in rl: + if not pattern_list[i]: + del pattern_list[i] + # + for path in paths: + if not os.path.isdir(path): + print('Not a directory, skipping', path) + continue + rpath = rel_path(path, self.local_path) + path_list = rpath.split(os.sep) + path_list.reverse() + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + if i>=len(path_list): + raise ValueError('cannot fill pattern %r with %r' \ + % (d, path)) + target_list.append(path_list[i]) + else: + assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath)) + target_list.append(s) + i += 1 + if path_list[i:]: + self.warn('mismatch of pattern_list=%s and path_list=%s'\ + % (pattern_list, path_list)) + target_list.reverse() + self.add_data_dir((os.sep.join(target_list), path)) + else: + for path in paths: + self.add_data_dir((d, path)) + return + assert not is_glob_pattern(d), repr(d) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + for path in paths: + for d1, f in list(general_source_directories_files(path)): + target_path = os.path.join(self.path_in_package, d, d1) + data_files.append((target_path, f)) + + def _optimize_data_files(self): + data_dict = {} + for p, files in self.data_files: + if p not in data_dict: + data_dict[p] = set() + for f in files: + data_dict[p].add(f) + self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()] + + def add_data_files(self,*files): + """Add data files to configuration data_files. + + Parameters + ---------- + files : sequence + Argument(s) can be either + + * 2-sequence (,) + * paths to data files where python datadir prefix defaults + to package dir. + + Notes + ----- + The form of each element of the files sequence is very flexible + allowing many combinations of where to get the files from the package + and where they should ultimately be installed on the system. The most + basic usage is for an element of the files argument sequence to be a + simple filename. This will cause that file from the local path to be + installed to the installation path of the self.name package (package + path). The file argument can also be a relative path in which case the + entire relative path will be installed into the package directory. + Finally, the file can be an absolute path name in which case the file + will be found at the absolute path name but installed to the package + path. + + This basic behavior can be augmented by passing a 2-tuple in as the + file argument. The first element of the tuple should specify the + relative path (under the package install directory) where the + remaining sequence of files should be installed to (it has nothing to + do with the file-names in the source distribution). The second element + of the tuple is the sequence of files that should be installed. The + files in this sequence can be filenames, relative paths, or absolute + paths. For absolute paths the file will be installed in the top-level + package installation directory (regardless of the first argument). + Filenames and relative path names will be installed in the package + install directory under the path name given as the first element of + the tuple. + + Rules for installation paths: + + #. file.txt -> (., file.txt)-> parent/file.txt + #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt + #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt + #. ``*``.txt -> parent/a.txt, parent/b.txt + #. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt + #. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt + #. (sun, file.txt) -> parent/sun/file.txt + #. (sun, bar/file.txt) -> parent/sun/file.txt + #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt + #. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt + #. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt + + An additional feature is that the path to a data-file can actually be + a function that takes no arguments and returns the actual path(s) to + the data-files. This is useful when the data files are generated while + building the package. + + Examples + -------- + Add files to the list of data_files to be included with the package. + + >>> self.add_data_files('foo.dat', + ... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']), + ... 'bar/cat.dat', + ... '/full/path/to/can.dat') #doctest: +SKIP + + will install these data files to:: + + / + foo.dat + fun/ + gun.dat + nun/ + pun.dat + sun.dat + bar/ + car.dat + can.dat + + where is the package (or sub-package) + directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage') or + '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C: + \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage'). + """ + + if len(files)>1: + for f in files: + self.add_data_files(f) + return + assert len(files)==1 + if is_sequence(files[0]): + d, files = files[0] + else: + d = None + if is_string(files): + filepat = files + elif is_sequence(files): + if len(files)==1: + filepat = files[0] + else: + for f in files: + self.add_data_files((d, f)) + return + else: + raise TypeError(repr(type(files))) + + if d is None: + if hasattr(filepat, '__call__'): + d = '' + elif os.path.isabs(filepat): + d = '' + else: + d = os.path.dirname(filepat) + self.add_data_files((d, files)) + return + + paths = self.paths(filepat, include_non_existing=False) + if is_glob_pattern(filepat): + if is_glob_pattern(d): + pattern_list = d.split(os.sep) + pattern_list.reverse() + for path in paths: + path_list = path.split(os.sep) + path_list.reverse() + path_list.pop() # filename + target_list = [] + i = 0 + for s in pattern_list: + if is_glob_pattern(s): + target_list.append(path_list[i]) + i += 1 + else: + target_list.append(s) + target_list.reverse() + self.add_data_files((os.sep.join(target_list), path)) + else: + self.add_data_files((d, paths)) + return + assert not is_glob_pattern(d), repr((d, filepat)) + + dist = self.get_distribution() + if dist is not None and dist.data_files is not None: + data_files = dist.data_files + else: + data_files = self.data_files + + data_files.append((os.path.join(self.path_in_package, d), paths)) + + ### XXX Implement add_py_modules + + def add_define_macros(self, macros): + """Add define macros to configuration + + Add the given sequence of macro name and value duples to the beginning + of the define_macros list This list will be visible to all extension + modules of the current package. + """ + dist = self.get_distribution() + if dist is not None: + if not hasattr(dist, 'define_macros'): + dist.define_macros = [] + dist.define_macros.extend(macros) + else: + self.define_macros.extend(macros) + + + def add_include_dirs(self,*paths): + """Add paths to configuration include directories. + + Add the given sequence of paths to the beginning of the include_dirs + list. This list will be visible to all extension modules of the + current package. + """ + include_dirs = self.paths(paths) + dist = self.get_distribution() + if dist is not None: + if dist.include_dirs is None: + dist.include_dirs = [] + dist.include_dirs.extend(include_dirs) + else: + self.include_dirs.extend(include_dirs) + + def add_headers(self,*files): + """Add installable headers to configuration. + + Add the given sequence of files to the beginning of the headers list. + By default, headers will be installed under // directory. If an item of files + is a tuple, then its first argument specifies the actual installation + location relative to the path. + + Parameters + ---------- + files : str or seq + Argument(s) can be either: + + * 2-sequence (,) + * path(s) to header file(s) where python includedir suffix will + default to package name. + """ + headers = [] + for path in files: + if is_string(path): + [headers.append((self.name, p)) for p in self.paths(path)] + else: + if not isinstance(path, (tuple, list)) or len(path) != 2: + raise TypeError(repr(path)) + [headers.append((path[0], p)) for p in self.paths(path[1])] + dist = self.get_distribution() + if dist is not None: + if dist.headers is None: + dist.headers = [] + dist.headers.extend(headers) + else: + self.headers.extend(headers) + + def paths(self,*paths,**kws): + """Apply glob to paths and prepend local_path if needed. + + Applies glob.glob(...) to each path in the sequence (if needed) and + pre-pends the local_path if needed. Because this is called on all + source lists, this allows wildcard characters to be specified in lists + of sources for extension modules and libraries and scripts and allows + path-names be relative to the source directory. + + """ + include_non_existing = kws.get('include_non_existing', True) + return gpaths(paths, + local_path = self.local_path, + include_non_existing=include_non_existing) + + def _fix_paths_dict(self, kw): + for k in kw.keys(): + v = kw[k] + if k in ['sources', 'depends', 'include_dirs', 'library_dirs', + 'module_dirs', 'extra_objects']: + new_v = self.paths(v) + kw[k] = new_v + + def add_extension(self,name,sources,**kw): + """Add extension to configuration. + + Create and add an Extension instance to the ext_modules list. This + method also takes the following optional keyword arguments that are + passed on to the Extension constructor. + + Parameters + ---------- + name : str + name of the extension + sources : seq + list of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + include_dirs : + define_macros : + undef_macros : + library_dirs : + libraries : + runtime_library_dirs : + extra_objects : + extra_compile_args : + extra_link_args : + extra_f77_compile_args : + extra_f90_compile_args : + export_symbols : + swig_opts : + depends : + The depends list contains paths to files or directories that the + sources of the extension module depend on. If any path in the + depends list is newer than the extension module, then the module + will be rebuilt. + language : + f2py_options : + module_dirs : + extra_info : dict or list + dict or list of dict of keywords to be appended to keywords. + + Notes + ----- + The self.paths(...) method is applied to all lists that may contain + paths. + """ + ext_args = copy.copy(kw) + ext_args['name'] = dot_join(self.name, name) + ext_args['sources'] = sources + + if 'extra_info' in ext_args: + extra_info = ext_args['extra_info'] + del ext_args['extra_info'] + if isinstance(extra_info, dict): + extra_info = [extra_info] + for info in extra_info: + assert isinstance(info, dict), repr(info) + dict_append(ext_args,**info) + + self._fix_paths_dict(ext_args) + + # Resolve out-of-tree dependencies + libraries = ext_args.get('libraries', []) + libnames = [] + ext_args['libraries'] = [] + for libname in libraries: + if isinstance(libname, tuple): + self._fix_paths_dict(libname[1]) + + # Handle library names of the form libname@relative/path/to/library + if '@' in libname: + lname, lpath = libname.split('@', 1) + lpath = os.path.abspath(njoin(self.local_path, lpath)) + if os.path.isdir(lpath): + c = self.get_subpackage(None, lpath, + caller_level = 2) + if isinstance(c, Configuration): + c = c.todict() + for l in [l[0] for l in c.get('libraries', [])]: + llname = l.split('__OF__', 1)[0] + if llname == lname: + c.pop('name', None) + dict_append(ext_args,**c) + break + continue + libnames.append(libname) + + ext_args['libraries'] = libnames + ext_args['libraries'] + ext_args['define_macros'] = \ + self.define_macros + ext_args.get('define_macros', []) + + from numpy.distutils.core import Extension + ext = Extension(**ext_args) + self.ext_modules.append(ext) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add an extension '+name) + return ext + + def add_library(self,name,sources,**build_info): + """ + Add library to configuration. + + Parameters + ---------- + name : str + Name of the extension. + sources : sequence + List of the sources. The list of sources may contain functions + (called source generators) which must take an extension instance + and a build directory as inputs and return a source file or list of + source files or None. If None is returned then no sources are + generated. If the Extension instance has no sources after + processing all source generators, then no extension module is + built. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + """ + self._add_library(name, sources, None, build_info) + + dist = self.get_distribution() + if dist is not None: + self.warn('distutils distribution has been initialized,'\ + ' it may be too late to add a library '+ name) + + def _add_library(self, name, sources, install_dir, build_info): + """Common implementation for add_library and add_installed_library. Do + not use directly""" + build_info = copy.copy(build_info) + build_info['sources'] = sources + + # Sometimes, depends is not set up to an empty list by default, and if + # depends is not given to add_library, distutils barfs (#1134) + if not 'depends' in build_info: + build_info['depends'] = [] + + self._fix_paths_dict(build_info) + + # Add to libraries list so that it is build with build_clib + self.libraries.append((name, build_info)) + + def add_installed_library(self, name, sources, install_dir, build_info=None): + """ + Similar to add_library, but the specified library is installed. + + Most C libraries used with `distutils` are only used to build python + extensions, but libraries built through this method will be installed + so that they can be reused by third-party packages. + + Parameters + ---------- + name : str + Name of the installed library. + sources : sequence + List of the library's source files. See `add_library` for details. + install_dir : str + Path to install the library, relative to the current sub-package. + build_info : dict, optional + The following keys are allowed: + + * depends + * macros + * include_dirs + * extra_compiler_args + * extra_f77_compile_args + * extra_f90_compile_args + * f2py_options + * language + + Returns + ------- + None + + See Also + -------- + add_library, add_npy_pkg_config, get_info + + Notes + ----- + The best way to encode the options required to link against the specified + C libraries is to use a "libname.ini" file, and use `get_info` to + retrieve the required options (see `add_npy_pkg_config` for more + information). + + """ + if not build_info: + build_info = {} + + install_dir = os.path.join(self.package_path, install_dir) + self._add_library(name, sources, install_dir, build_info) + self.installed_libraries.append(InstallableLib(name, build_info, install_dir)) + + def add_npy_pkg_config(self, template, install_dir, subst_dict=None): + """ + Generate and install a npy-pkg config file from a template. + + The config file generated from `template` is installed in the + given install directory, using `subst_dict` for variable substitution. + + Parameters + ---------- + template : str + The path of the template, relatively to the current package path. + install_dir : str + Where to install the npy-pkg config file, relatively to the current + package path. + subst_dict : dict, optional + If given, any string of the form ``@key@`` will be replaced by + ``subst_dict[key]`` in the template file when installed. The install + prefix is always available through the variable ``@prefix@``, since the + install prefix is not easy to get reliably from setup.py. + + See also + -------- + add_installed_library, get_info + + Notes + ----- + This works for both standard installs and in-place builds, i.e. the + ``@prefix@`` refer to the source directory for in-place builds. + + Examples + -------- + :: + + config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar}) + + Assuming the foo.ini.in file has the following content:: + + [meta] + Name=@foo@ + Version=1.0 + Description=dummy description + + [default] + Cflags=-I@prefix@/include + Libs= + + The generated file will have the following content:: + + [meta] + Name=bar + Version=1.0 + Description=dummy description + + [default] + Cflags=-Iprefix_dir/include + Libs= + + and will be installed as foo.ini in the 'lib' subpath. + + When cross-compiling with numpy distutils, it might be necessary to + use modified npy-pkg-config files. Using the default/generated files + will link with the host libraries (i.e. libnpymath.a). For + cross-compilation you of-course need to link with target libraries, + while using the host Python installation. + + You can copy out the numpy/core/lib/npy-pkg-config directory, add a + pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment + variable to point to the directory with the modified npy-pkg-config + files. + + Example npymath.ini modified for cross-compilation:: + + [meta] + Name=npymath + Description=Portable, core math library implementing C99 standard + Version=0.1 + + [variables] + pkgname=numpy.core + pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core + prefix=${pkgdir} + libdir=${prefix}/lib + includedir=${prefix}/include + + [default] + Libs=-L${libdir} -lnpymath + Cflags=-I${includedir} + Requires=mlib + + [msvc] + Libs=/LIBPATH:${libdir} npymath.lib + Cflags=/INCLUDE:${includedir} + Requires=mlib + + """ + if subst_dict is None: + subst_dict = {} + template = os.path.join(self.package_path, template) + + if self.name in self.installed_pkg_config: + self.installed_pkg_config[self.name].append((template, install_dir, + subst_dict)) + else: + self.installed_pkg_config[self.name] = [(template, install_dir, + subst_dict)] + + + def add_scripts(self,*files): + """Add scripts to configuration. + + Add the sequence of files to the beginning of the scripts list. + Scripts will be installed under the /bin/ directory. + + """ + scripts = self.paths(files) + dist = self.get_distribution() + if dist is not None: + if dist.scripts is None: + dist.scripts = [] + dist.scripts.extend(scripts) + else: + self.scripts.extend(scripts) + + def dict_append(self,**dict): + for key in self.list_keys: + a = getattr(self, key) + a.extend(dict.get(key, [])) + for key in self.dict_keys: + a = getattr(self, key) + a.update(dict.get(key, {})) + known_keys = self.list_keys + self.dict_keys + self.extra_keys + for key in dict.keys(): + if key not in known_keys: + a = getattr(self, key, None) + if a and a==dict[key]: continue + self.warn('Inheriting attribute %r=%r from %r' \ + % (key, dict[key], dict.get('name', '?'))) + setattr(self, key, dict[key]) + self.extra_keys.append(key) + elif key in self.extra_keys: + self.info('Ignoring attempt to set %r (from %r to %r)' \ + % (key, getattr(self, key), dict[key])) + elif key in known_keys: + # key is already processed above + pass + else: + raise ValueError("Don't know about key=%r" % (key)) + + def __str__(self): + from pprint import pformat + known_keys = self.list_keys + self.dict_keys + self.extra_keys + s = '<'+5*'-' + '\n' + s += 'Configuration of '+self.name+':\n' + known_keys.sort() + for k in known_keys: + a = getattr(self, k, None) + if a: + s += '%s = %s\n' % (k, pformat(a)) + s += 5*'-' + '>' + return s + + def get_config_cmd(self): + """ + Returns the numpy.distutils config command instance. + """ + cmd = get_cmd('config') + cmd.ensure_finalized() + cmd.dump_source = 0 + cmd.noisy = 0 + old_path = os.environ.get('PATH') + if old_path: + path = os.pathsep.join(['.', old_path]) + os.environ['PATH'] = path + return cmd + + def get_build_temp_dir(self): + """ + Return a path to a temporary directory where temporary files should be + placed. + """ + cmd = get_cmd('build') + cmd.ensure_finalized() + return cmd.build_temp + + def have_f77c(self): + """Check for availability of Fortran 77 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 77 compiler is available (because a simple Fortran 77 + code was able to be compiled successfully). + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77') + return flag + + def have_f90c(self): + """Check for availability of Fortran 90 compiler. + + Use it inside source generating function to ensure that + setup distribution instance has been initialized. + + Notes + ----- + True if a Fortran 90 compiler is available (because a simple Fortran + 90 code was able to be compiled successfully) + """ + simple_fortran_subroutine = ''' + subroutine simple + end + ''' + config_cmd = self.get_config_cmd() + flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90') + return flag + + def append_to(self, extlib): + """Append libraries, include_dirs to extension or library item. + """ + if is_sequence(extlib): + lib_name, build_info = extlib + dict_append(build_info, + libraries=self.libraries, + include_dirs=self.include_dirs) + else: + from numpy.distutils.core import Extension + assert isinstance(extlib, Extension), repr(extlib) + extlib.libraries.extend(self.libraries) + extlib.include_dirs.extend(self.include_dirs) + + def _get_svn_revision(self, path): + """Return path's SVN revision number. + """ + try: + output = subprocess.check_output(['svnversion'], cwd=path) + except (subprocess.CalledProcessError, OSError): + pass + else: + m = re.match(rb'(?P\d+)', output) + if m: + return int(m.group('revision')) + + if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): + entries = njoin(path, '_svn', 'entries') + else: + entries = njoin(path, '.svn', 'entries') + if os.path.isfile(entries): + with open(entries) as f: + fstr = f.read() + if fstr[:5] == '\d+)"', fstr) + if m: + return int(m.group('revision')) + else: # non-xml entries file --- check to be sure that + m = re.search(r'dir[\n\r]+(?P\d+)', fstr) + if m: + return int(m.group('revision')) + return None + + def _get_hg_revision(self, path): + """Return path's Mercurial revision number. + """ + try: + output = subprocess.check_output( + ['hg', 'identify', '--num'], cwd=path) + except (subprocess.CalledProcessError, OSError): + pass + else: + m = re.match(rb'(?P\d+)', output) + if m: + return int(m.group('revision')) + + branch_fn = njoin(path, '.hg', 'branch') + branch_cache_fn = njoin(path, '.hg', 'branch.cache') + + if os.path.isfile(branch_fn): + branch0 = None + with open(branch_fn) as f: + revision0 = f.read().strip() + + branch_map = {} + with open(branch_cache_fn) as f: + for line in f: + branch1, revision1 = line.split()[:2] + if revision1==revision0: + branch0 = branch1 + try: + revision1 = int(revision1) + except ValueError: + continue + branch_map[branch1] = revision1 + + return branch_map.get(branch0) + + return None + + + def get_version(self, version_file=None, version_variable=None): + """Try to get version string of a package. + + Return a version string of the current package or None if the version + information could not be detected. + + Notes + ----- + This method scans files named + __version__.py, _version.py, version.py, and + __svn_version__.py for string variables version, __version__, and + _version, until a version number is found. + """ + version = getattr(self, 'version', None) + if version is not None: + return version + + # Get version from version file. + if version_file is None: + files = ['__version__.py', + self.name.split('.')[-1]+'_version.py', + 'version.py', + '__svn_version__.py', + '__hg_version__.py'] + else: + files = [version_file] + if version_variable is None: + version_vars = ['version', + '__version__', + self.name.split('.')[-1]+'_version'] + else: + version_vars = [version_variable] + for f in files: + fn = njoin(self.local_path, f) + if os.path.isfile(fn): + info = ('.py', 'U', 1) + name = os.path.splitext(os.path.basename(fn))[0] + n = dot_join(self.name, name) + try: + version_module = exec_mod_from_location( + '_'.join(n.split('.')), fn) + except ImportError as e: + self.warn(str(e)) + version_module = None + if version_module is None: + continue + + for a in version_vars: + version = getattr(version_module, a, None) + if version is not None: + break + + # Try if versioneer module + try: + version = version_module.get_versions()['version'] + except AttributeError: + pass + + if version is not None: + break + + if version is not None: + self.version = version + return version + + # Get version as SVN or Mercurial revision number + revision = self._get_svn_revision(self.local_path) + if revision is None: + revision = self._get_hg_revision(self.local_path) + + if revision is not None: + version = str(revision) + self.version = version + + return version + + def make_svn_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __svn_version__.py file to the current package directory. + + Generate package __svn_version__.py file from SVN revision number, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __svn_version__.py existed before, nothing is done. + + This is + intended for working with source directories that are in an SVN + repository. + """ + target = njoin(self.local_path, '__svn_version__.py') + revision = self._get_svn_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_svn_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_svn_version_py())) + + def make_hg_version_py(self, delete=True): + """Appends a data function to the data_files list that will generate + __hg_version__.py file to the current package directory. + + Generate package __hg_version__.py file from Mercurial revision, + it will be removed after python exits but will be available + when sdist, etc commands are executed. + + Notes + ----- + If __hg_version__.py existed before, nothing is done. + + This is intended for working with source directories that are + in an Mercurial repository. + """ + target = njoin(self.local_path, '__hg_version__.py') + revision = self._get_hg_revision(self.local_path) + if os.path.isfile(target) or revision is None: + return + else: + def generate_hg_version_py(): + if not os.path.isfile(target): + version = str(revision) + self.info('Creating %s (version=%r)' % (target, version)) + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) + + def rm_file(f=target,p=self.info): + if delete: + try: os.remove(f); p('removed '+f) + except OSError: pass + try: os.remove(f+'c'); p('removed '+f+'c') + except OSError: pass + + atexit.register(rm_file) + + return target + + self.add_data_files(('', generate_hg_version_py())) + + def make_config_py(self,name='__config__'): + """Generate package __config__.py file containing system_info + information used during building the package. + + This file is installed to the + package installation directory. + + """ + self.py_modules.append((self.name, name, generate_config_py)) + + def get_info(self,*names): + """Get resources information. + + Return information (from system_info.get_info) for all of the names in + the argument list in a single dictionary. + """ + from .system_info import get_info, dict_append + info_dict = {} + for a in names: + dict_append(info_dict,**get_info(a)) + return info_dict + + +def get_cmd(cmdname, _cache={}): + if cmdname not in _cache: + import distutils.core + dist = distutils.core._setup_distribution + if dist is None: + from distutils.errors import DistutilsInternalError + raise DistutilsInternalError( + 'setup distribution instance not initialized') + cmd = dist.get_command_obj(cmdname) + _cache[cmdname] = cmd + return _cache[cmdname] + +def get_numpy_include_dirs(): + # numpy_include_dirs are set by numpy/core/setup.py, otherwise [] + include_dirs = Configuration.numpy_include_dirs[:] + if not include_dirs: + import numpy + include_dirs = [ numpy.get_include() ] + # else running numpy/core/setup.py + return include_dirs + +def get_npy_pkg_dir(): + """Return the path where to find the npy-pkg-config directory. + + If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that + is returned. Otherwise, a path inside the location of the numpy module is + returned. + + The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining + customized npy-pkg-config .ini files for the cross-compilation + environment, and using them when cross-compiling. + + """ + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d is not None: + return d + spec = importlib.util.find_spec('numpy') + d = os.path.join(os.path.dirname(spec.origin), + 'core', 'lib', 'npy-pkg-config') + return d + +def get_pkg_info(pkgname, dirs=None): + """ + Return library info for the given package. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_info + + """ + from numpy.distutils.npy_pkg_config import read_config + + if dirs: + dirs.append(get_npy_pkg_dir()) + else: + dirs = [get_npy_pkg_dir()] + return read_config(pkgname, dirs) + +def get_info(pkgname, dirs=None): + """ + Return an info dict for a given C library. + + The info dict contains the necessary options to use the C library. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of additional directories where to look + for npy-pkg-config files. Those directories are searched prior to the + NumPy directory. + + Returns + ------- + info : dict + The dictionary with build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + Configuration.add_npy_pkg_config, Configuration.add_installed_library, + get_pkg_info + + Examples + -------- + To get the necessary information for the npymath library from NumPy: + + >>> npymath_info = np.distutils.misc_util.get_info('npymath') + >>> npymath_info #doctest: +SKIP + {'define_macros': [], 'libraries': ['npymath'], 'library_dirs': + ['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']} + + This info dict can then be used as input to a `Configuration` instance:: + + config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info) + + """ + from numpy.distutils.npy_pkg_config import parse_flags + pkg_info = get_pkg_info(pkgname, dirs) + + # Translate LibraryInfo instance into a build_info dict + info = parse_flags(pkg_info.cflags()) + for k, v in parse_flags(pkg_info.libs()).items(): + info[k].extend(v) + + # add_extension extra_info argument is ANAL + info['define_macros'] = info['macros'] + del info['macros'] + del info['ignored'] + + return info + +def is_bootstrapping(): + import builtins + + try: + builtins.__NUMPY_SETUP__ + return True + except AttributeError: + return False + + +######################### + +def default_config_dict(name = None, parent_name = None, local_path=None): + """Return a configuration dictionary for usage in + configuration() function defined in file setup_.py. + """ + import warnings + warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\ + 'deprecated default_config_dict(%r,%r,%r)' + % (name, parent_name, local_path, + name, parent_name, local_path, + ), stacklevel=2) + c = Configuration(name, parent_name, local_path) + return c.todict() + + +def dict_append(d, **kws): + for k, v in kws.items(): + if k in d: + ov = d[k] + if isinstance(ov, str): + d[k] = v + else: + d[k].extend(v) + else: + d[k] = v + +def appendpath(prefix, path): + if os.path.sep != '/': + prefix = prefix.replace('/', os.path.sep) + path = path.replace('/', os.path.sep) + drive = '' + if os.path.isabs(path): + drive = os.path.splitdrive(prefix)[0] + absprefix = os.path.splitdrive(os.path.abspath(prefix))[1] + pathdrive, path = os.path.splitdrive(path) + d = os.path.commonprefix([absprefix, path]) + if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \ + or os.path.join(path[:len(d)], path[len(d):]) != path: + # Handle invalid paths + d = os.path.dirname(d) + subpath = path[len(d):] + if os.path.isabs(subpath): + subpath = subpath[1:] + else: + subpath = path + return os.path.normpath(njoin(drive + prefix, subpath)) + +def generate_config_py(target): + """Generate config.py file containing system_info information + used during building the package. + + Usage: + config['py_modules'].append((packagename, '__config__',generate_config_py)) + """ + from numpy.distutils.system_info import system_info + from distutils.dir_util import mkpath + mkpath(os.path.dirname(target)) + with open(target, 'w') as f: + f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) + f.write('# It contains system_info results at the time of building this package.\n') + f.write('__all__ = ["get_info","show"]\n\n') + + # For gfortran+msvc combination, extra shared libraries may exist + f.write(textwrap.dedent(""" + import os + import sys + + extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') + + if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): + os.add_dll_directory(extra_dll_dir) + + """)) + + for k, i in system_info.saved_results.items(): + f.write('%s=%r\n' % (k, i)) + f.write(textwrap.dedent(r''' + def get_info(name): + g = globals() + return g.get(name, g.get(name + "_info", {})) + + def show(): + """ + Show libraries in the system on which NumPy was built. + + Print information about various resources (libraries, library + directories, include directories, etc.) in the system on which + NumPy was built. + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + 1. Classes specifying the information to be printed are defined + in the `numpy.distutils.system_info` module. + + Information may include: + + * ``language``: language used to write the libraries (mostly + C or f77) + * ``libraries``: names of libraries found in the system + * ``library_dirs``: directories containing the libraries + * ``include_dirs``: directories containing library header files + * ``src_dirs``: directories containing library source files + * ``define_macros``: preprocessor macros used by + ``distutils.setup`` + * ``baseline``: minimum CPU features required + * ``found``: dispatched features supported in the system + * ``not found``: dispatched features that are not supported + in the system + + 2. NumPy BLAS/LAPACK Installation Notes + + Installing a numpy wheel (``pip install numpy`` or force it + via ``pip install numpy --only-binary :numpy: numpy``) includes + an OpenBLAS implementation of the BLAS and LAPACK linear algebra + APIs. In this case, ``library_dirs`` reports the original build + time configuration as compiled with gcc/gfortran; at run time + the OpenBLAS library is in + ``site-packages/numpy.libs/`` (linux), or + ``site-packages/numpy/.dylibs/`` (macOS), or + ``site-packages/numpy/.libs/`` (windows). + + Installing numpy from source + (``pip install numpy --no-binary numpy``) searches for BLAS and + LAPACK dynamic link libraries at build time as influenced by + environment variables NPY_BLAS_LIBS, NPY_CBLAS_LIBS, and + NPY_LAPACK_LIBS; or NPY_BLAS_ORDER and NPY_LAPACK_ORDER; + or the optional file ``~/.numpy-site.cfg``. + NumPy remembers those locations and expects to load the same + libraries at run-time. + In NumPy 1.21+ on macOS, 'accelerate' (Apple's Accelerate BLAS + library) is in the default build-time search order after + 'openblas'. + + Examples + -------- + >>> import numpy as np + >>> np.show_config() + blas_opt_info: + language = c + define_macros = [('HAVE_CBLAS', None)] + libraries = ['openblas', 'openblas'] + library_dirs = ['/usr/local/lib'] + """ + from numpy.core._multiarray_umath import ( + __cpu_features__, __cpu_baseline__, __cpu_dispatch__ + ) + for name,info_dict in globals().items(): + if name[0] == "_" or type(info_dict) is not type({}): continue + print(name + ":") + if not info_dict: + print(" NOT AVAILABLE") + for k,v in info_dict.items(): + v = str(v) + if k == "sources" and len(v) > 200: + v = v[:60] + " ...\n... " + v[-60:] + print(" %s = %s" % (k,v)) + + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + + print("Supported SIMD extensions in this NumPy install:") + print(" baseline = %s" % (','.join(__cpu_baseline__))) + print(" found = %s" % (','.join(features_found))) + print(" not found = %s" % (','.join(features_not_found))) + + ''')) + + return target + +def msvc_version(compiler): + """Return version major and minor of compiler instance if it is + MSVC, raise an exception otherwise.""" + if not compiler.compiler_type == "msvc": + raise ValueError("Compiler instance is not msvc (%s)"\ + % compiler.compiler_type) + return compiler._MSVCCompiler__version + +def get_build_architecture(): + # Importing distutils.msvccompiler triggers a warning on non-Windows + # systems, so delay the import to here. + from distutils.msvccompiler import get_build_architecture + return get_build_architecture() + + +_cxx_ignore_flags = {'-Werror=implicit-function-declaration', '-std=c99'} + + +def sanitize_cxx_flags(cxxflags): + ''' + Some flags are valid for C but not C++. Prune them. + ''' + return [flag for flag in cxxflags if flag not in _cxx_ignore_flags] + + +def exec_mod_from_location(modname, modfile): + ''' + Use importlib machinery to import a module `modname` from the file + `modfile`. Depending on the `spec.loader`, the module may not be + registered in sys.modules. + ''' + spec = importlib.util.spec_from_file_location(modname, modfile) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + return foo diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/msvc9compiler.py b/.venv/lib/python3.11/site-packages/numpy/distutils/msvc9compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..68239495d6c72b70257e51d7ec3ddb35611940a2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/msvc9compiler.py @@ -0,0 +1,63 @@ +import os +from distutils.msvc9compiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if not old: + return new + if new in old: + return old + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self, plat_name=None): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib') + environ_include = os.getenv('include') + _MSVCCompiler.initialize(self, plat_name) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] + + def manifest_setup_ldargs(self, output_filename, build_temp, ld_args): + ld_args.append('/MANIFEST') + _MSVCCompiler.manifest_setup_ldargs(self, output_filename, + build_temp, ld_args) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/msvccompiler.py b/.venv/lib/python3.11/site-packages/numpy/distutils/msvccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..2b93221baac8b122a1cca97278db3748159b780b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/msvccompiler.py @@ -0,0 +1,76 @@ +import os +from distutils.msvccompiler import MSVCCompiler as _MSVCCompiler + +from .system_info import platform_bits + + +def _merge(old, new): + """Concatenate two environment paths avoiding repeats. + + Here `old` is the environment string before the base class initialize + function is called and `new` is the string after the call. The new string + will be a fixed string if it is not obtained from the current environment, + or the same as the old string if obtained from the same environment. The aim + here is not to append the new string if it is already contained in the old + string so as to limit the growth of the environment string. + + Parameters + ---------- + old : string + Previous environment string. + new : string + New environment string. + + Returns + ------- + ret : string + Updated environment string. + + """ + if new in old: + return old + if not old: + return new + + # Neither new nor old is empty. Give old priority. + return ';'.join([old, new]) + + +class MSVCCompiler(_MSVCCompiler): + def __init__(self, verbose=0, dry_run=0, force=0): + _MSVCCompiler.__init__(self, verbose, dry_run, force) + + def initialize(self): + # The 'lib' and 'include' variables may be overwritten + # by MSVCCompiler.initialize, so save them for later merge. + environ_lib = os.getenv('lib', '') + environ_include = os.getenv('include', '') + _MSVCCompiler.initialize(self) + + # Merge current and previous values of 'lib' and 'include' + os.environ['lib'] = _merge(environ_lib, os.environ['lib']) + os.environ['include'] = _merge(environ_include, os.environ['include']) + + # msvc9 building for 32 bits requires SSE2 to work around a + # compiler bug. + if platform_bits == 32: + self.compile_options += ['/arch:SSE2'] + self.compile_options_debug += ['/arch:SSE2'] + + +def lib_opts_if_msvc(build_cmd): + """ Add flags if we are using MSVC compiler + + We can't see `build_cmd` in our scope, because we have not initialized + the distutils build command, so use this deferred calculation to run + when we are building the library. + """ + if build_cmd.compiler.compiler_type != 'msvc': + return [] + # Explicitly disable whole-program optimization. + flags = ['/GL-'] + # Disable voltbl section for vc142 to allow link using mingw-w64; see: + # https://github.com/matthew-brett/dll_investigation/issues/1#issuecomment-1100468171 + if build_cmd.compiler_opt.cc_test_flags(['-d2VolatileMetadata-']): + flags.append('-d2VolatileMetadata-') + return flags diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/npy_pkg_config.py b/.venv/lib/python3.11/site-packages/numpy/distutils/npy_pkg_config.py new file mode 100644 index 0000000000000000000000000000000000000000..f6e3ad3974ca63115e1f8124e743235bb300f1a1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/npy_pkg_config.py @@ -0,0 +1,437 @@ +import sys +import re +import os + +from configparser import RawConfigParser + +__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', + 'read_config', 'parse_flags'] + +_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}') + +class FormatError(OSError): + """ + Exception thrown when there is a problem parsing a configuration file. + + """ + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +class PkgNotFound(OSError): + """Exception raised when a package can not be located.""" + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + +def parse_flags(line): + """ + Parse a line from a config file containing compile flags. + + Parameters + ---------- + line : str + A single line containing one or more compile flags. + + Returns + ------- + d : dict + Dictionary of parsed flags, split into relevant categories. + These categories are the keys of `d`: + + * 'include_dirs' + * 'library_dirs' + * 'libraries' + * 'macros' + * 'ignored' + + """ + d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], + 'macros': [], 'ignored': []} + + flags = (' ' + line).split(' -') + for flag in flags: + flag = '-' + flag + if len(flag) > 0: + if flag.startswith('-I'): + d['include_dirs'].append(flag[2:].strip()) + elif flag.startswith('-L'): + d['library_dirs'].append(flag[2:].strip()) + elif flag.startswith('-l'): + d['libraries'].append(flag[2:].strip()) + elif flag.startswith('-D'): + d['macros'].append(flag[2:].strip()) + else: + d['ignored'].append(flag) + + return d + +def _escape_backslash(val): + return val.replace('\\', '\\\\') + +class LibraryInfo: + """ + Object containing build information about a library. + + Parameters + ---------- + name : str + The library name. + description : str + Description of the library. + version : str + Version string. + sections : dict + The sections of the configuration file for the library. The keys are + the section headers, the values the text under each header. + vars : class instance + A `VariableSet` instance, which contains ``(name, value)`` pairs for + variables defined in the configuration file for the library. + requires : sequence, optional + The required libraries for the library to be installed. + + Notes + ----- + All input parameters (except "sections" which is a method) are available as + attributes of the same name. + + """ + def __init__(self, name, description, version, sections, vars, requires=None): + self.name = name + self.description = description + if requires: + self.requires = requires + else: + self.requires = [] + self.version = version + self._sections = sections + self.vars = vars + + def sections(self): + """ + Return the section headers of the config file. + + Parameters + ---------- + None + + Returns + ------- + keys : list of str + The list of section headers. + + """ + return list(self._sections.keys()) + + def cflags(self, section="default"): + val = self.vars.interpolate(self._sections[section]['cflags']) + return _escape_backslash(val) + + def libs(self, section="default"): + val = self.vars.interpolate(self._sections[section]['libs']) + return _escape_backslash(val) + + def __str__(self): + m = ['Name: %s' % self.name, 'Description: %s' % self.description] + if self.requires: + m.append('Requires:') + else: + m.append('Requires: %s' % ",".join(self.requires)) + m.append('Version: %s' % self.version) + + return "\n".join(m) + +class VariableSet: + """ + Container object for the variables defined in a config file. + + `VariableSet` can be used as a plain dictionary, with the variable names + as keys. + + Parameters + ---------- + d : dict + Dict of items in the "variables" section of the configuration file. + + """ + def __init__(self, d): + self._raw_data = dict([(k, v) for k, v in d.items()]) + + self._re = {} + self._re_sub = {} + + self._init_parse() + + def _init_parse(self): + for k, v in self._raw_data.items(): + self._init_parse_var(k, v) + + def _init_parse_var(self, name, value): + self._re[name] = re.compile(r'\$\{%s\}' % name) + self._re_sub[name] = value + + def interpolate(self, value): + # Brute force: we keep interpolating until there is no '${var}' anymore + # or until interpolated string is equal to input string + def _interpolate(value): + for k in self._re.keys(): + value = self._re[k].sub(self._re_sub[k], value) + return value + while _VAR.search(value): + nvalue = _interpolate(value) + if nvalue == value: + break + value = nvalue + + return value + + def variables(self): + """ + Return the list of variable names. + + Parameters + ---------- + None + + Returns + ------- + names : list of str + The names of all variables in the `VariableSet` instance. + + """ + return list(self._raw_data.keys()) + + # Emulate a dict to set/get variables values + def __getitem__(self, name): + return self._raw_data[name] + + def __setitem__(self, name, value): + self._raw_data[name] = value + self._init_parse_var(name, value) + +def parse_meta(config): + if not config.has_section('meta'): + raise FormatError("No meta section found !") + + d = dict(config.items('meta')) + + for k in ['name', 'description', 'version']: + if not k in d: + raise FormatError("Option %s (section [meta]) is mandatory, " + "but not found" % k) + + if not 'requires' in d: + d['requires'] = [] + + return d + +def parse_variables(config): + if not config.has_section('variables'): + raise FormatError("No variables section found !") + + d = {} + + for name, value in config.items("variables"): + d[name] = value + + return VariableSet(d) + +def parse_sections(config): + return meta_d, r + +def pkg_to_filename(pkg_name): + return "%s.ini" % pkg_name + +def parse_config(filename, dirs=None): + if dirs: + filenames = [os.path.join(d, filename) for d in dirs] + else: + filenames = [filename] + + config = RawConfigParser() + + n = config.read(filenames) + if not len(n) >= 1: + raise PkgNotFound("Could not find file(s) %s" % str(filenames)) + + # Parse meta and variables sections + meta = parse_meta(config) + + vars = {} + if config.has_section('variables'): + for name, value in config.items("variables"): + vars[name] = _escape_backslash(value) + + # Parse "normal" sections + secs = [s for s in config.sections() if not s in ['meta', 'variables']] + sections = {} + + requires = {} + for s in secs: + d = {} + if config.has_option(s, "requires"): + requires[s] = config.get(s, 'requires') + + for name, value in config.items(s): + d[name] = value + sections[s] = d + + return meta, vars, sections, requires + +def _read_config_imp(filenames, dirs=None): + def _read_config(f): + meta, vars, sections, reqs = parse_config(f, dirs) + # recursively add sections and variables of required libraries + for rname, rvalue in reqs.items(): + nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) + + # Update var dict for variables not in 'top' config file + for k, v in nvars.items(): + if not k in vars: + vars[k] = v + + # Update sec dict + for oname, ovalue in nsections[rname].items(): + if ovalue: + sections[rname][oname] += ' %s' % ovalue + + return meta, vars, sections, reqs + + meta, vars, sections, reqs = _read_config(filenames) + + # FIXME: document this. If pkgname is defined in the variables section, and + # there is no pkgdir variable defined, pkgdir is automatically defined to + # the path of pkgname. This requires the package to be imported to work + if not 'pkgdir' in vars and "pkgname" in vars: + pkgname = vars["pkgname"] + if not pkgname in sys.modules: + raise ValueError("You should import %s to get information on %s" % + (pkgname, meta["name"])) + + mod = sys.modules[pkgname] + vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) + + return LibraryInfo(name=meta["name"], description=meta["description"], + version=meta["version"], sections=sections, vars=VariableSet(vars)) + +# Trivial cache to cache LibraryInfo instances creation. To be really +# efficient, the cache should be handled in read_config, since a same file can +# be parsed many time outside LibraryInfo creation, but I doubt this will be a +# problem in practice +_CACHE = {} +def read_config(pkgname, dirs=None): + """ + Return library info for a package from its configuration file. + + Parameters + ---------- + pkgname : str + Name of the package (should match the name of the .ini file, without + the extension, e.g. foo for the file foo.ini). + dirs : sequence, optional + If given, should be a sequence of directories - usually including + the NumPy base directory - where to look for npy-pkg-config files. + + Returns + ------- + pkginfo : class instance + The `LibraryInfo` instance containing the build information. + + Raises + ------ + PkgNotFound + If the package is not found. + + See Also + -------- + misc_util.get_info, misc_util.get_pkg_info + + Examples + -------- + >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') + >>> type(npymath_info) + + >>> print(npymath_info) + Name: npymath + Description: Portable, core math library implementing C99 standard + Requires: + Version: 0.1 #random + + """ + try: + return _CACHE[pkgname] + except KeyError: + v = _read_config_imp(pkg_to_filename(pkgname), dirs) + _CACHE[pkgname] = v + return v + +# TODO: +# - implements version comparison (modversion + atleast) + +# pkg-config simple emulator - useful for debugging, and maybe later to query +# the system +if __name__ == '__main__': + from optparse import OptionParser + import glob + + parser = OptionParser() + parser.add_option("--cflags", dest="cflags", action="store_true", + help="output all preprocessor and compiler flags") + parser.add_option("--libs", dest="libs", action="store_true", + help="output all linker flags") + parser.add_option("--use-section", dest="section", + help="use this section instead of default for options") + parser.add_option("--version", dest="version", action="store_true", + help="output version") + parser.add_option("--atleast-version", dest="min_version", + help="Minimal version") + parser.add_option("--list-all", dest="list_all", action="store_true", + help="Minimal version") + parser.add_option("--define-variable", dest="define_variable", + help="Replace variable with the given value") + + (options, args) = parser.parse_args(sys.argv) + + if len(args) < 2: + raise ValueError("Expect package name on the command line:") + + if options.list_all: + files = glob.glob("*.ini") + for f in files: + info = read_config(f) + print("%s\t%s - %s" % (info.name, info.name, info.description)) + + pkg_name = args[1] + d = os.environ.get('NPY_PKG_CONFIG_PATH') + if d: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) + else: + info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) + + if options.section: + section = options.section + else: + section = "default" + + if options.define_variable: + m = re.search(r'([\S]+)=([\S]+)', options.define_variable) + if not m: + raise ValueError("--define-variable option should be of " + "the form --define-variable=foo=bar") + else: + name = m.group(1) + value = m.group(2) + info.vars[name] = value + + if options.cflags: + print(info.cflags(section)) + if options.libs: + print(info.libs(section)) + if options.version: + print(info.version) + if options.min_version: + print(info.version >= options.min_version) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/numpy_distribution.py b/.venv/lib/python3.11/site-packages/numpy/distutils/numpy_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..ea8182659cb1af718879de305798b62c23bf3346 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/numpy_distribution.py @@ -0,0 +1,17 @@ +# XXX: Handle setuptools ? +from distutils.core import Distribution + +# This class is used because we add new files (sconscripts, and so on) with the +# scons command +class NumpyDistribution(Distribution): + def __init__(self, attrs = None): + # A list of (sconscripts, pre_hook, post_hook, src, parent_names) + self.scons_data = [] + # A list of installable libraries + self.installed_libraries = [] + # A dict of pkg_config files to generate/install + self.installed_pkg_config = {} + Distribution.__init__(self, attrs) + + def has_scons_scripts(self): + return bool(self.scons_data) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/pathccompiler.py b/.venv/lib/python3.11/site-packages/numpy/distutils/pathccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..48051810ee218fb037cc15ccec05293e5ae9bb6b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/pathccompiler.py @@ -0,0 +1,21 @@ +from distutils.unixccompiler import UnixCCompiler + +class PathScaleCCompiler(UnixCCompiler): + + """ + PathScale compiler compatible with an gcc built Python. + """ + + compiler_type = 'pathcc' + cc_exe = 'pathcc' + cxx_exe = 'pathCC' + + def __init__ (self, verbose=0, dry_run=0, force=0): + UnixCCompiler.__init__ (self, verbose, dry_run, force) + cc_compiler = self.cc_exe + cxx_compiler = self.cxx_exe + self.set_executables(compiler=cc_compiler, + compiler_so=cc_compiler, + compiler_cxx=cxx_compiler, + linker_exe=cc_compiler, + linker_so=cc_compiler + ' -shared') diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/setup.py b/.venv/lib/python3.11/site-packages/numpy/distutils/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..522756fc9db359002c7208b75094b103323f13c6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/setup.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration('distutils', parent_package, top_path) + config.add_subpackage('command') + config.add_subpackage('fcompiler') + config.add_subpackage('tests') + config.add_data_files('site.cfg') + config.add_data_files('mingw/gfortran_vs2003_hack.c') + config.add_data_dir('checks') + config.add_data_files('*.pyi') + config.make_config_py() + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(configuration=configuration) diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/system_info.py b/.venv/lib/python3.11/site-packages/numpy/distutils/system_info.py new file mode 100644 index 0000000000000000000000000000000000000000..feb28f61cf070c9dfc0b2fc6f205f477f6a66c8b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/system_info.py @@ -0,0 +1,3271 @@ +#!/usr/bin/env python3 +""" +This file defines a set of system_info classes for getting +information about various resources (libraries, library directories, +include directories, etc.) in the system. Usage: + info_dict = get_info() + where is a string 'atlas','x11','fftw','lapack','blas', + 'lapack_src', 'blas_src', etc. For a complete list of allowed names, + see the definition of get_info() function below. + + Returned info_dict is a dictionary which is compatible with + distutils.setup keyword arguments. If info_dict == {}, then the + asked resource is not available (system_info could not find it). + + Several *_info classes specify an environment variable to specify + the locations of software. When setting the corresponding environment + variable to 'None' then the software will be ignored, even when it + is available in system. + +Global parameters: + system_info.search_static_first - search static libraries (.a) + in precedence to shared ones (.so, .sl) if enabled. + system_info.verbosity - output the results to stdout if enabled. + +The file 'site.cfg' is looked for in + +1) Directory of main setup.py file being run. +2) Home directory of user running the setup.py file as ~/.numpy-site.cfg +3) System wide directory (location of this file...) + +The first one found is used to get system configuration options The +format is that used by ConfigParser (i.e., Windows .INI style). The +section ALL is not intended for general use. + +Appropriate defaults are used if nothing is specified. + +The order of finding the locations of resources is the following: + 1. environment variable + 2. section in site.cfg + 3. DEFAULT section in site.cfg + 4. System default search paths (see ``default_*`` variables below). +Only the first complete match is returned. + +Currently, the following classes are available, along with their section names: + + Numeric_info:Numeric + _numpy_info:Numeric + _pkg_config_info:None + accelerate_info:accelerate + accelerate_lapack_info:accelerate + agg2_info:agg2 + amd_info:amd + atlas_3_10_blas_info:atlas + atlas_3_10_blas_threads_info:atlas + atlas_3_10_info:atlas + atlas_3_10_threads_info:atlas + atlas_blas_info:atlas + atlas_blas_threads_info:atlas + atlas_info:atlas + atlas_threads_info:atlas + blas64__opt_info:ALL # usage recommended (general ILP64 BLAS, 64_ symbol suffix) + blas_ilp64_opt_info:ALL # usage recommended (general ILP64 BLAS) + blas_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 BLAS, no symbol suffix) + blas_info:blas + blas_mkl_info:mkl + blas_ssl2_info:ssl2 + blas_opt_info:ALL # usage recommended + blas_src_info:blas_src + blis_info:blis + boost_python_info:boost_python + dfftw_info:fftw + dfftw_threads_info:fftw + djbfft_info:djbfft + f2py_info:ALL + fft_opt_info:ALL + fftw2_info:fftw + fftw3_info:fftw3 + fftw_info:fftw + fftw_threads_info:fftw + flame_info:flame + freetype2_info:freetype2 + gdk_2_info:gdk_2 + gdk_info:gdk + gdk_pixbuf_2_info:gdk_pixbuf_2 + gdk_pixbuf_xlib_2_info:gdk_pixbuf_xlib_2 + gdk_x11_2_info:gdk_x11_2 + gtkp_2_info:gtkp_2 + gtkp_x11_2_info:gtkp_x11_2 + lapack64__opt_info:ALL # usage recommended (general ILP64 LAPACK, 64_ symbol suffix) + lapack_atlas_3_10_info:atlas + lapack_atlas_3_10_threads_info:atlas + lapack_atlas_info:atlas + lapack_atlas_threads_info:atlas + lapack_ilp64_opt_info:ALL # usage recommended (general ILP64 LAPACK) + lapack_ilp64_plain_opt_info:ALL # usage recommended (general ILP64 LAPACK, no symbol suffix) + lapack_info:lapack + lapack_mkl_info:mkl + lapack_ssl2_info:ssl2 + lapack_opt_info:ALL # usage recommended + lapack_src_info:lapack_src + mkl_info:mkl + ssl2_info:ssl2 + numarray_info:numarray + numerix_info:numerix + numpy_info:numpy + openblas64__info:openblas64_ + openblas64__lapack_info:openblas64_ + openblas_clapack_info:openblas + openblas_ilp64_info:openblas_ilp64 + openblas_ilp64_lapack_info:openblas_ilp64 + openblas_info:openblas + openblas_lapack_info:openblas + sfftw_info:fftw + sfftw_threads_info:fftw + system_info:ALL + umfpack_info:umfpack + wx_info:wx + x11_info:x11 + xft_info:xft + +Note that blas_opt_info and lapack_opt_info honor the NPY_BLAS_ORDER +and NPY_LAPACK_ORDER environment variables to determine the order in which +specific BLAS and LAPACK libraries are searched for. + +This search (or autodetection) can be bypassed by defining the environment +variables NPY_BLAS_LIBS and NPY_LAPACK_LIBS, which should then contain the +exact linker flags to use (language will be set to F77). Building against +Netlib BLAS/LAPACK or stub files, in order to be able to switch BLAS and LAPACK +implementations at runtime. If using this to build NumPy itself, it is +recommended to also define NPY_CBLAS_LIBS (assuming your BLAS library has a +CBLAS interface) to enable CBLAS usage for matrix multiplication (unoptimized +otherwise). + +Example: +---------- +[DEFAULT] +# default section +library_dirs = /usr/lib:/usr/local/lib:/opt/lib +include_dirs = /usr/include:/usr/local/include:/opt/include +src_dirs = /usr/local/src:/opt/src +# search static libraries (.a) in preference to shared ones (.so) +search_static_first = 0 + +[fftw] +libraries = rfftw, fftw + +[atlas] +library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas +# for overriding the names of the atlas libraries +libraries = lapack, f77blas, cblas, atlas + +[x11] +library_dirs = /usr/X11R6/lib +include_dirs = /usr/X11R6/include +---------- + +Note that the ``libraries`` key is the default setting for libraries. + +Authors: + Pearu Peterson , February 2002 + David M. Cooke , April 2002 + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +""" +import sys +import os +import re +import copy +import warnings +import subprocess +import textwrap + +from glob import glob +from functools import reduce +from configparser import NoOptionError +from configparser import RawConfigParser as ConfigParser +# It seems that some people are importing ConfigParser from here so is +# good to keep its class name. Use of RawConfigParser is needed in +# order to be able to load path names with percent in them, like +# `feature%2Fcool` which is common on git flow branch names. + +from distutils.errors import DistutilsError +from distutils.dist import Distribution +import sysconfig +from numpy.distutils import log +from distutils.util import get_platform + +from numpy.distutils.exec_command import ( + find_executable, filepath_from_subprocess_output, + ) +from numpy.distutils.misc_util import (is_sequence, is_string, + get_shared_lib_extension) +from numpy.distutils.command.config import config as cmd_config +from numpy.distutils import customized_ccompiler as _customized_ccompiler +from numpy.distutils import _shell_utils +import distutils.ccompiler +import tempfile +import shutil + +__all__ = ['system_info'] + +# Determine number of bits +import platform +_bits = {'32bit': 32, '64bit': 64} +platform_bits = _bits[platform.architecture()[0]] + + +global_compiler = None + +def customized_ccompiler(): + global global_compiler + if not global_compiler: + global_compiler = _customized_ccompiler() + return global_compiler + + +def _c_string_literal(s): + """ + Convert a python string into a literal suitable for inclusion into C code + """ + # only these three characters are forbidden in C strings + s = s.replace('\\', r'\\') + s = s.replace('"', r'\"') + s = s.replace('\n', r'\n') + return '"{}"'.format(s) + + +def libpaths(paths, bits): + """Return a list of library paths valid on 32 or 64 bit systems. + + Inputs: + paths : sequence + A sequence of strings (typically paths) + bits : int + An integer, the only valid values are 32 or 64. A ValueError exception + is raised otherwise. + + Examples: + + Consider a list of directories + >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] + + For a 32-bit platform, this is already valid: + >>> np.distutils.system_info.libpaths(paths,32) + ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] + + On 64 bits, we prepend the '64' postfix + >>> np.distutils.system_info.libpaths(paths,64) + ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', + '/usr/lib64', '/usr/lib'] + """ + if bits not in (32, 64): + raise ValueError("Invalid bit size in libpaths: 32 or 64 only") + + # Handle 32bit case + if bits == 32: + return paths + + # Handle 64bit case + out = [] + for p in paths: + out.extend([p + '64', p]) + + return out + + +if sys.platform == 'win32': + default_lib_dirs = ['C:\\', + os.path.join(sysconfig.get_config_var('exec_prefix'), + 'libs')] + default_runtime_dirs = [] + default_include_dirs = [] + default_src_dirs = ['.'] + default_x11_lib_dirs = [] + default_x11_include_dirs = [] + _include_dirs = [ + 'include', + 'include/suitesparse', + ] + _lib_dirs = [ + 'lib', + ] + + _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] + _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] + def add_system_root(library_root): + """Add a package manager root to the include directories""" + global default_lib_dirs + global default_include_dirs + + library_root = os.path.normpath(library_root) + + default_lib_dirs.extend( + os.path.join(library_root, d) for d in _lib_dirs) + default_include_dirs.extend( + os.path.join(library_root, d) for d in _include_dirs) + + # VCpkg is the de-facto package manager on windows for C/C++ + # libraries. If it is on the PATH, then we append its paths here. + vcpkg = shutil.which('vcpkg') + if vcpkg: + vcpkg_dir = os.path.dirname(vcpkg) + if platform.architecture()[0] == '32bit': + specifier = 'x86' + else: + specifier = 'x64' + + vcpkg_installed = os.path.join(vcpkg_dir, 'installed') + for vcpkg_root in [ + os.path.join(vcpkg_installed, specifier + '-windows'), + os.path.join(vcpkg_installed, specifier + '-windows-static'), + ]: + add_system_root(vcpkg_root) + + # Conda is another popular package manager that provides libraries + conda = shutil.which('conda') + if conda: + conda_dir = os.path.dirname(conda) + add_system_root(os.path.join(conda_dir, '..', 'Library')) + add_system_root(os.path.join(conda_dir, 'Library')) + +else: + default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', + '/opt/local/lib', '/sw/lib'], platform_bits) + default_runtime_dirs = [] + default_include_dirs = ['/usr/local/include', + '/opt/include', + # path of umfpack under macports + '/opt/local/include/ufsparse', + '/opt/local/include', '/sw/include', + '/usr/include/suitesparse'] + default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src'] + + default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib', + '/usr/lib'], platform_bits) + default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include'] + + if os.path.exists('/usr/lib/X11'): + globbed_x11_dir = glob('/usr/lib/*/libX11.so') + if globbed_x11_dir: + x11_so_dir = os.path.split(globbed_x11_dir[0])[0] + default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11']) + default_x11_include_dirs.extend(['/usr/lib/X11/include', + '/usr/include/X11']) + + with open(os.devnull, 'w') as tmp: + try: + p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE, + stderr=tmp) + except (OSError, DistutilsError): + # OSError if gcc is not installed, or SandboxViolation (DistutilsError + # subclass) if an old setuptools bug is triggered (see gh-3160). + pass + else: + triplet = str(p.communicate()[0].decode().strip()) + if p.returncode == 0: + # gcc supports the "-print-multiarch" option + default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)] + default_lib_dirs += [os.path.join("/usr/lib/", triplet)] + + +if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: + default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) + default_include_dirs.append(os.path.join(sys.prefix, 'include')) + default_src_dirs.append(os.path.join(sys.prefix, 'src')) + +default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)] +default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)] +default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)] +default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)] + +so_ext = get_shared_lib_extension() + + +def get_standard_file(fname): + """Returns a list of files named 'fname' from + 1) System-wide directory (directory-location of this module) + 2) Users HOME directory (os.environ['HOME']) + 3) Local directory + """ + # System-wide file + filenames = [] + try: + f = __file__ + except NameError: + f = sys.argv[0] + sysfile = os.path.join(os.path.split(os.path.abspath(f))[0], + fname) + if os.path.isfile(sysfile): + filenames.append(sysfile) + + # Home directory + # And look for the user config file + try: + f = os.path.expanduser('~') + except KeyError: + pass + else: + user_file = os.path.join(f, fname) + if os.path.isfile(user_file): + filenames.append(user_file) + + # Local file + if os.path.isfile(fname): + filenames.append(os.path.abspath(fname)) + + return filenames + + +def _parse_env_order(base_order, env): + """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order` + + This method will sequence the environment variable and check for their + individual elements in `base_order`. + + The items in the environment variable may be negated via '^item' or '!itema,itemb'. + It must start with ^/! to negate all options. + + Raises + ------ + ValueError: for mixed negated and non-negated orders or multiple negated orders + + Parameters + ---------- + base_order : list of str + the base list of orders + env : str + the environment variable to be parsed, if none is found, `base_order` is returned + + Returns + ------- + allow_order : list of str + allowed orders in lower-case + unknown_order : list of str + for values not overlapping with `base_order` + """ + order_str = os.environ.get(env, None) + + # ensure all base-orders are lower-case (for easier comparison) + base_order = [order.lower() for order in base_order] + if order_str is None: + return base_order, [] + + neg = order_str.startswith('^') or order_str.startswith('!') + # Check format + order_str_l = list(order_str) + sum_neg = order_str_l.count('^') + order_str_l.count('!') + if neg: + if sum_neg > 1: + raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}") + # remove prefix + order_str = order_str[1:] + elif sum_neg > 0: + raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}") + + # Split and lower case + orders = order_str.lower().split(',') + + # to inform callee about non-overlapping elements + unknown_order = [] + + # if negated, we have to remove from the order + if neg: + allow_order = base_order.copy() + + for order in orders: + if not order: + continue + + if order not in base_order: + unknown_order.append(order) + continue + + if order in allow_order: + allow_order.remove(order) + + else: + allow_order = [] + + for order in orders: + if not order: + continue + + if order not in base_order: + unknown_order.append(order) + continue + + if order not in allow_order: + allow_order.append(order) + + return allow_order, unknown_order + + +def get_info(name, notfound_action=0): + """ + notfound_action: + 0 - do nothing + 1 - display warning message + 2 - raise error + """ + cl = {'armpl': armpl_info, + 'blas_armpl': blas_armpl_info, + 'lapack_armpl': lapack_armpl_info, + 'fftw3_armpl': fftw3_armpl_info, + 'atlas': atlas_info, # use lapack_opt or blas_opt instead + 'atlas_threads': atlas_threads_info, # ditto + 'atlas_blas': atlas_blas_info, + 'atlas_blas_threads': atlas_blas_threads_info, + 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead + 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto + 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead + 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto + 'atlas_3_10_blas': atlas_3_10_blas_info, + 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info, + 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead + 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto + 'flame': flame_info, # use lapack_opt instead + 'mkl': mkl_info, + 'ssl2': ssl2_info, + # openblas which may or may not have embedded lapack + 'openblas': openblas_info, # use blas_opt instead + # openblas with embedded lapack + 'openblas_lapack': openblas_lapack_info, # use blas_opt instead + 'openblas_clapack': openblas_clapack_info, # use blas_opt instead + 'blis': blis_info, # use blas_opt instead + 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead + 'blas_mkl': blas_mkl_info, # use blas_opt instead + 'lapack_ssl2': lapack_ssl2_info, + 'blas_ssl2': blas_ssl2_info, + 'accelerate': accelerate_info, # use blas_opt instead + 'accelerate_lapack': accelerate_lapack_info, + 'openblas64_': openblas64__info, + 'openblas64__lapack': openblas64__lapack_info, + 'openblas_ilp64': openblas_ilp64_info, + 'openblas_ilp64_lapack': openblas_ilp64_lapack_info, + 'x11': x11_info, + 'fft_opt': fft_opt_info, + 'fftw': fftw_info, + 'fftw2': fftw2_info, + 'fftw3': fftw3_info, + 'dfftw': dfftw_info, + 'sfftw': sfftw_info, + 'fftw_threads': fftw_threads_info, + 'dfftw_threads': dfftw_threads_info, + 'sfftw_threads': sfftw_threads_info, + 'djbfft': djbfft_info, + 'blas': blas_info, # use blas_opt instead + 'lapack': lapack_info, # use lapack_opt instead + 'lapack_src': lapack_src_info, + 'blas_src': blas_src_info, + 'numpy': numpy_info, + 'f2py': f2py_info, + 'Numeric': Numeric_info, + 'numeric': Numeric_info, + 'numarray': numarray_info, + 'numerix': numerix_info, + 'lapack_opt': lapack_opt_info, + 'lapack_ilp64_opt': lapack_ilp64_opt_info, + 'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info, + 'lapack64__opt': lapack64__opt_info, + 'blas_opt': blas_opt_info, + 'blas_ilp64_opt': blas_ilp64_opt_info, + 'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info, + 'blas64__opt': blas64__opt_info, + 'boost_python': boost_python_info, + 'agg2': agg2_info, + 'wx': wx_info, + 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info, + 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info, + 'gdk_pixbuf_2': gdk_pixbuf_2_info, + 'gdk-pixbuf-2.0': gdk_pixbuf_2_info, + 'gdk': gdk_info, + 'gdk_2': gdk_2_info, + 'gdk-2.0': gdk_2_info, + 'gdk_x11_2': gdk_x11_2_info, + 'gdk-x11-2.0': gdk_x11_2_info, + 'gtkp_x11_2': gtkp_x11_2_info, + 'gtk+-x11-2.0': gtkp_x11_2_info, + 'gtkp_2': gtkp_2_info, + 'gtk+-2.0': gtkp_2_info, + 'xft': xft_info, + 'freetype2': freetype2_info, + 'umfpack': umfpack_info, + 'amd': amd_info, + }.get(name.lower(), system_info) + return cl().get_info(notfound_action) + + +class NotFoundError(DistutilsError): + """Some third-party program or library is not found.""" + + +class AliasedOptionError(DistutilsError): + """ + Aliases entries in config files should not be existing. + In section '{section}' we found multiple appearances of options {options}.""" + + +class AtlasNotFoundError(NotFoundError): + """ + Atlas (http://github.com/math-atlas/math-atlas) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [atlas]) or by setting + the ATLAS environment variable.""" + + +class FlameNotFoundError(NotFoundError): + """ + FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [flame]).""" + + +class LapackNotFoundError(NotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [lapack]) or by setting + the LAPACK environment variable.""" + + +class LapackSrcNotFoundError(LapackNotFoundError): + """ + Lapack (http://www.netlib.org/lapack/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [lapack_src]) or by setting + the LAPACK_SRC environment variable.""" + + +class LapackILP64NotFoundError(NotFoundError): + """ + 64-bit Lapack libraries not found. + Known libraries in numpy/distutils/site.cfg file are: + openblas64_, openblas_ilp64 + """ + +class BlasOptNotFoundError(NotFoundError): + """ + Optimized (vendor) Blas libraries are not found. + Falls back to netlib Blas library which has worse performance. + A better performance should be easily gained by switching + Blas library.""" + +class BlasNotFoundError(NotFoundError): + """ + Blas (http://www.netlib.org/blas/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [blas]) or by setting + the BLAS environment variable.""" + +class BlasILP64NotFoundError(NotFoundError): + """ + 64-bit Blas libraries not found. + Known libraries in numpy/distutils/site.cfg file are: + openblas64_, openblas_ilp64 + """ + +class BlasSrcNotFoundError(BlasNotFoundError): + """ + Blas (http://www.netlib.org/blas/) sources not found. + Directories to search for the sources can be specified in the + numpy/distutils/site.cfg file (section [blas_src]) or by setting + the BLAS_SRC environment variable.""" + + +class FFTWNotFoundError(NotFoundError): + """ + FFTW (http://www.fftw.org/) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [fftw]) or by setting + the FFTW environment variable.""" + + +class DJBFFTNotFoundError(NotFoundError): + """ + DJBFFT (https://cr.yp.to/djbfft.html) libraries not found. + Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [djbfft]) or by setting + the DJBFFT environment variable.""" + + +class NumericNotFoundError(NotFoundError): + """ + Numeric (https://www.numpy.org/) module not found. + Get it from above location, install it, and retry setup.py.""" + + +class X11NotFoundError(NotFoundError): + """X11 libraries not found.""" + + +class UmfpackNotFoundError(NotFoundError): + """ + UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/) + not found. Directories to search for the libraries can be specified in the + numpy/distutils/site.cfg file (section [umfpack]) or by setting + the UMFPACK environment variable.""" + + +class system_info: + + """ get_info() is the only public method. Don't use others. + """ + dir_env_var = None + # XXX: search_static_first is disabled by default, may disappear in + # future unless it is proved to be useful. + search_static_first = 0 + # The base-class section name is a random word "ALL" and is not really + # intended for general use. It cannot be None nor can it be DEFAULT as + # these break the ConfigParser. See gh-15338 + section = 'ALL' + saved_results = {} + + notfounderror = NotFoundError + + def __init__(self, + default_lib_dirs=default_lib_dirs, + default_include_dirs=default_include_dirs, + ): + self.__class__.info = {} + self.local_prefixes = [] + defaults = {'library_dirs': os.pathsep.join(default_lib_dirs), + 'include_dirs': os.pathsep.join(default_include_dirs), + 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs), + 'rpath': '', + 'src_dirs': os.pathsep.join(default_src_dirs), + 'search_static_first': str(self.search_static_first), + 'extra_compile_args': '', 'extra_link_args': ''} + self.cp = ConfigParser(defaults) + self.files = [] + self.files.extend(get_standard_file('.numpy-site.cfg')) + self.files.extend(get_standard_file('site.cfg')) + self.parse_config_files() + + if self.section is not None: + self.search_static_first = self.cp.getboolean( + self.section, 'search_static_first') + assert isinstance(self.search_static_first, int) + + def parse_config_files(self): + self.cp.read(self.files) + if not self.cp.has_section(self.section): + if self.section is not None: + self.cp.add_section(self.section) + + def calc_libraries_info(self): + libs = self.get_libraries() + dirs = self.get_lib_dirs() + # The extensions use runtime_library_dirs + r_dirs = self.get_runtime_lib_dirs() + # Intrinsic distutils use rpath, we simply append both entries + # as though they were one entry + r_dirs.extend(self.get_runtime_lib_dirs(key='rpath')) + info = {} + for lib in libs: + i = self.check_libs(dirs, [lib]) + if i is not None: + dict_append(info, **i) + else: + log.info('Library %s was not found. Ignoring' % (lib)) + + if r_dirs: + i = self.check_libs(r_dirs, [lib]) + if i is not None: + # Swap library keywords found to runtime_library_dirs + # the libraries are insisting on the user having defined + # them using the library_dirs, and not necessarily by + # runtime_library_dirs + del i['libraries'] + i['runtime_library_dirs'] = i.pop('library_dirs') + dict_append(info, **i) + else: + log.info('Runtime library %s was not found. Ignoring' % (lib)) + + return info + + def set_info(self, **info): + if info: + lib_info = self.calc_libraries_info() + dict_append(info, **lib_info) + # Update extra information + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + self.saved_results[self.__class__.__name__] = info + + def get_option_single(self, *options): + """ Ensure that only one of `options` are found in the section + + Parameters + ---------- + *options : list of str + a list of options to be found in the section (``self.section``) + + Returns + ------- + str : + the option that is uniquely found in the section + + Raises + ------ + AliasedOptionError : + in case more than one of the options are found + """ + found = [self.cp.has_option(self.section, opt) for opt in options] + if sum(found) == 1: + return options[found.index(True)] + elif sum(found) == 0: + # nothing is found anyways + return options[0] + + # Else we have more than 1 key found + if AliasedOptionError.__doc__ is None: + raise AliasedOptionError() + raise AliasedOptionError(AliasedOptionError.__doc__.format( + section=self.section, options='[{}]'.format(', '.join(options)))) + + + def has_info(self): + return self.__class__.__name__ in self.saved_results + + def calc_extra_info(self): + """ Updates the information in the current information with + respect to these flags: + extra_compile_args + extra_link_args + """ + info = {} + for key in ['extra_compile_args', 'extra_link_args']: + # Get values + opt = self.cp.get(self.section, key) + opt = _shell_utils.NativeParser.split(opt) + if opt: + tmp = {key: opt} + dict_append(info, **tmp) + return info + + def get_info(self, notfound_action=0): + """ Return a dictionary with items that are compatible + with numpy.distutils.setup keyword arguments. + """ + flag = 0 + if not self.has_info(): + flag = 1 + log.info(self.__class__.__name__ + ':') + if hasattr(self, 'calc_info'): + self.calc_info() + if notfound_action: + if not self.has_info(): + if notfound_action == 1: + warnings.warn(self.notfounderror.__doc__, stacklevel=2) + elif notfound_action == 2: + raise self.notfounderror(self.notfounderror.__doc__) + else: + raise ValueError(repr(notfound_action)) + + if not self.has_info(): + log.info(' NOT AVAILABLE') + self.set_info() + else: + log.info(' FOUND:') + + res = self.saved_results.get(self.__class__.__name__) + if log.get_threshold() <= log.INFO and flag: + for k, v in res.items(): + v = str(v) + if k in ['sources', 'libraries'] and len(v) > 270: + v = v[:120] + '...\n...\n...' + v[-120:] + log.info(' %s = %s', k, v) + log.info('') + + return copy.deepcopy(res) + + def get_paths(self, section, key): + dirs = self.cp.get(section, key).split(os.pathsep) + env_var = self.dir_env_var + if env_var: + if is_sequence(env_var): + e0 = env_var[-1] + for e in env_var: + if e in os.environ: + e0 = e + break + if not env_var[0] == e0: + log.info('Setting %s=%s' % (env_var[0], e0)) + env_var = e0 + if env_var and env_var in os.environ: + d = os.environ[env_var] + if d == 'None': + log.info('Disabled %s: %s', + self.__class__.__name__, '(%s is None)' + % (env_var,)) + return [] + if os.path.isfile(d): + dirs = [os.path.dirname(d)] + dirs + l = getattr(self, '_lib_names', []) + if len(l) == 1: + b = os.path.basename(d) + b = os.path.splitext(b)[0] + if b[:3] == 'lib': + log.info('Replacing _lib_names[0]==%r with %r' \ + % (self._lib_names[0], b[3:])) + self._lib_names[0] = b[3:] + else: + ds = d.split(os.pathsep) + ds2 = [] + for d in ds: + if os.path.isdir(d): + ds2.append(d) + for dd in ['include', 'lib']: + d1 = os.path.join(d, dd) + if os.path.isdir(d1): + ds2.append(d1) + dirs = ds2 + dirs + default_dirs = self.cp.get(self.section, key).split(os.pathsep) + dirs.extend(default_dirs) + ret = [] + for d in dirs: + if len(d) > 0 and not os.path.isdir(d): + warnings.warn('Specified path %s is invalid.' % d, stacklevel=2) + continue + + if d not in ret: + ret.append(d) + + log.debug('( %s = %s )', key, ':'.join(ret)) + return ret + + def get_lib_dirs(self, key='library_dirs'): + return self.get_paths(self.section, key) + + def get_runtime_lib_dirs(self, key='runtime_library_dirs'): + path = self.get_paths(self.section, key) + if path == ['']: + path = [] + return path + + def get_include_dirs(self, key='include_dirs'): + return self.get_paths(self.section, key) + + def get_src_dirs(self, key='src_dirs'): + return self.get_paths(self.section, key) + + def get_libs(self, key, default): + try: + libs = self.cp.get(self.section, key) + except NoOptionError: + if not default: + return [] + if is_string(default): + return [default] + return default + return [b for b in [a.strip() for a in libs.split(',')] if b] + + def get_libraries(self, key='libraries'): + if hasattr(self, '_lib_names'): + return self.get_libs(key, default=self._lib_names) + else: + return self.get_libs(key, '') + + def library_extensions(self): + c = customized_ccompiler() + static_exts = [] + if c.compiler_type != 'msvc': + # MSVC doesn't understand binutils + static_exts.append('.a') + if sys.platform == 'win32': + static_exts.append('.lib') # .lib is used by MSVC and others + if self.search_static_first: + exts = static_exts + [so_ext] + else: + exts = [so_ext] + static_exts + if sys.platform == 'cygwin': + exts.append('.dll.a') + if sys.platform == 'darwin': + exts.append('.dylib') + return exts + + def check_libs(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks for all libraries as shared libraries first, then + static (or vice versa if self.search_static_first is True). + """ + exts = self.library_extensions() + info = None + for ext in exts: + info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) + if info is not None: + break + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + return info + + def check_libs2(self, lib_dirs, libs, opt_libs=[]): + """If static or shared libraries are available then return + their info dictionary. + + Checks each library for shared or static. + """ + exts = self.library_extensions() + info = self._check_libs(lib_dirs, libs, opt_libs, exts) + if not info: + log.info(' libraries %s not found in %s', ','.join(libs), + lib_dirs) + + return info + + def _find_lib(self, lib_dir, lib, exts): + assert is_string(lib_dir) + # under windows first try without 'lib' prefix + if sys.platform == 'win32': + lib_prefixes = ['', 'lib'] + else: + lib_prefixes = ['lib'] + # for each library name, see if we can find a file for it. + for ext in exts: + for prefix in lib_prefixes: + p = self.combine_paths(lib_dir, prefix + lib + ext) + if p: + break + if p: + assert len(p) == 1 + # ??? splitext on p[0] would do this for cygwin + # doesn't seem correct + if ext == '.dll.a': + lib += '.dll' + if ext == '.lib': + lib = prefix + lib + return lib + + return False + + def _find_libs(self, lib_dirs, libs, exts): + # make sure we preserve the order of libs, as it can be important + found_dirs, found_libs = [], [] + for lib in libs: + for lib_dir in lib_dirs: + found_lib = self._find_lib(lib_dir, lib, exts) + if found_lib: + found_libs.append(found_lib) + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + break + return found_dirs, found_libs + + def _check_libs(self, lib_dirs, libs, opt_libs, exts): + """Find mandatory and optional libs in expected paths. + + Missing optional libraries are silently forgotten. + """ + if not is_sequence(lib_dirs): + lib_dirs = [lib_dirs] + # First, try to find the mandatory libraries + found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts) + if len(found_libs) > 0 and len(found_libs) == len(libs): + # Now, check for optional libraries + opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts) + found_libs.extend(opt_found_libs) + for lib_dir in opt_found_dirs: + if lib_dir not in found_dirs: + found_dirs.append(lib_dir) + info = {'libraries': found_libs, 'library_dirs': found_dirs} + return info + else: + return None + + def combine_paths(self, *args): + """Return a list of existing paths composed by all combinations + of items from the arguments. + """ + return combine_paths(*args) + + +class fft_opt_info(system_info): + + def calc_info(self): + info = {} + fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw') + djbfft_info = get_info('djbfft') + if fftw_info: + dict_append(info, **fftw_info) + if djbfft_info: + dict_append(info, **djbfft_info) + self.set_info(**info) + return + + +class fftw_info(system_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + {'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]}] + + def calc_ver_info(self, ver_param): + """Returns True on successful version detection, else False""" + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + + opt = self.get_option_single(self.section + '_libs', 'libraries') + libs = self.get_libs(opt, ver_param['libs']) + info = self.check_libs(lib_dirs, libs) + if info is not None: + flag = 0 + for d in incl_dirs: + if len(self.combine_paths(d, ver_param['includes'])) \ + == len(ver_param['includes']): + dict_append(info, include_dirs=[d]) + flag = 1 + break + if flag: + dict_append(info, define_macros=ver_param['macros']) + else: + info = None + if info is not None: + self.set_info(**info) + return True + else: + log.info(' %s not found' % (ver_param['name'])) + return False + + def calc_info(self): + for i in self.ver_info: + if self.calc_ver_info(i): + break + + +class fftw2_info(fftw_info): + #variables to override + section = 'fftw' + dir_env_var = 'FFTW' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw2', + 'libs':['rfftw', 'fftw'], + 'includes':['fftw.h', 'rfftw.h'], + 'macros':[('SCIPY_FFTW_H', None)]} + ] + + +class fftw3_info(fftw_info): + #variables to override + section = 'fftw3' + dir_env_var = 'FFTW3' + notfounderror = FFTWNotFoundError + ver_info = [{'name':'fftw3', + 'libs':['fftw3'], + 'includes':['fftw3.h'], + 'macros':[('SCIPY_FFTW3_H', None)]}, + ] + + +class fftw3_armpl_info(fftw_info): + section = 'fftw3' + dir_env_var = 'ARMPL_DIR' + notfounderror = FFTWNotFoundError + ver_info = [{'name': 'fftw3', + 'libs': ['armpl_lp64_mp'], + 'includes': ['fftw3.h'], + 'macros': [('SCIPY_FFTW3_H', None)]}] + + +class dfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw', + 'libs':['drfftw', 'dfftw'], + 'includes':['dfftw.h', 'drfftw.h'], + 'macros':[('SCIPY_DFFTW_H', None)]}] + + +class sfftw_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw', + 'libs':['srfftw', 'sfftw'], + 'includes':['sfftw.h', 'srfftw.h'], + 'macros':[('SCIPY_SFFTW_H', None)]}] + + +class fftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'fftw threads', + 'libs':['rfftw_threads', 'fftw_threads'], + 'includes':['fftw_threads.h', 'rfftw_threads.h'], + 'macros':[('SCIPY_FFTW_THREADS_H', None)]}] + + +class dfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'dfftw threads', + 'libs':['drfftw_threads', 'dfftw_threads'], + 'includes':['dfftw_threads.h', 'drfftw_threads.h'], + 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}] + + +class sfftw_threads_info(fftw_info): + section = 'fftw' + dir_env_var = 'FFTW' + ver_info = [{'name':'sfftw threads', + 'libs':['srfftw_threads', 'sfftw_threads'], + 'includes':['sfftw_threads.h', 'srfftw_threads.h'], + 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}] + + +class djbfft_info(system_info): + section = 'djbfft' + dir_env_var = 'DJBFFT' + notfounderror = DJBFFTNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['djbfft']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + info = None + for d in lib_dirs: + p = self.combine_paths(d, ['djbfft.a']) + if p: + info = {'extra_objects': p} + break + p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext]) + if p: + info = {'libraries': ['djbfft'], 'library_dirs': [d]} + break + if info is None: + return + for d in incl_dirs: + if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2: + dict_append(info, include_dirs=[d], + define_macros=[('SCIPY_DJBFFT_H', None)]) + self.set_info(**info) + return + return + + +class mkl_info(system_info): + section = 'mkl' + dir_env_var = 'MKLROOT' + _lib_mkl = ['mkl_rt'] + + def get_mkl_rootdir(self): + mklroot = os.environ.get('MKLROOT', None) + if mklroot is not None: + return mklroot + paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) + ld_so_conf = '/etc/ld.so.conf' + if os.path.isfile(ld_so_conf): + with open(ld_so_conf) as f: + for d in f: + d = d.strip() + if d: + paths.append(d) + intel_mkl_dirs = [] + for path in paths: + path_atoms = path.split(os.sep) + for m in path_atoms: + if m.startswith('mkl'): + d = os.sep.join(path_atoms[:path_atoms.index(m) + 2]) + intel_mkl_dirs.append(d) + break + for d in paths: + dirs = glob(os.path.join(d, 'mkl', '*')) + dirs += glob(os.path.join(d, 'mkl*')) + for sub_dir in dirs: + if os.path.isdir(os.path.join(sub_dir, 'lib')): + return sub_dir + return None + + def __init__(self): + mklroot = self.get_mkl_rootdir() + if mklroot is None: + system_info.__init__(self) + else: + from .cpuinfo import cpu + if cpu.is_Itanium(): + plt = '64' + elif cpu.is_Intel() and cpu.is_64bit(): + plt = 'intel64' + else: + plt = '32' + system_info.__init__( + self, + default_lib_dirs=[os.path.join(mklroot, 'lib', plt)], + default_include_dirs=[os.path.join(mklroot, 'include')]) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + opt = self.get_option_single('mkl_libs', 'libraries') + mkl_libs = self.get_libs(opt, self._lib_mkl) + info = self.check_libs2(lib_dirs, mkl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + if sys.platform == 'win32': + pass # win32 has no pthread library + else: + dict_append(info, libraries=['pthread']) + self.set_info(**info) + + +class lapack_mkl_info(mkl_info): + pass + + +class blas_mkl_info(mkl_info): + pass + + +class ssl2_info(system_info): + section = 'ssl2' + dir_env_var = 'SSL2_DIR' + # Multi-threaded version. Python itself must be built by Fujitsu compiler. + _lib_ssl2 = ['fjlapackexsve'] + # Single-threaded version + #_lib_ssl2 = ['fjlapacksve'] + + def get_tcsds_rootdir(self): + tcsdsroot = os.environ.get('TCSDS_PATH', None) + if tcsdsroot is not None: + return tcsdsroot + return None + + def __init__(self): + tcsdsroot = self.get_tcsds_rootdir() + if tcsdsroot is None: + system_info.__init__(self) + else: + system_info.__init__( + self, + default_lib_dirs=[os.path.join(tcsdsroot, 'lib64')], + default_include_dirs=[os.path.join(tcsdsroot, + 'clang-comp/include')]) + + def calc_info(self): + tcsdsroot = self.get_tcsds_rootdir() + + lib_dirs = self.get_lib_dirs() + if lib_dirs is None: + lib_dirs = os.path.join(tcsdsroot, 'lib64') + + incl_dirs = self.get_include_dirs() + if incl_dirs is None: + incl_dirs = os.path.join(tcsdsroot, 'clang-comp/include') + + ssl2_libs = self.get_libs('ssl2_libs', self._lib_ssl2) + + info = self.check_libs2(lib_dirs, ssl2_libs) + if info is None: + return + dict_append(info, + define_macros=[('HAVE_CBLAS', None), + ('HAVE_SSL2', 1)], + include_dirs=incl_dirs,) + self.set_info(**info) + + +class lapack_ssl2_info(ssl2_info): + pass + + +class blas_ssl2_info(ssl2_info): + pass + + + +class armpl_info(system_info): + section = 'armpl' + dir_env_var = 'ARMPL_DIR' + _lib_armpl = ['armpl_lp64_mp'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + incl_dirs = self.get_include_dirs() + armpl_libs = self.get_libs('armpl_libs', self._lib_armpl) + info = self.check_libs2(lib_dirs, armpl_libs) + if info is None: + return + dict_append(info, + define_macros=[('SCIPY_MKL_H', None), + ('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + self.set_info(**info) + +class lapack_armpl_info(armpl_info): + pass + +class blas_armpl_info(armpl_info): + pass + + +class atlas_info(system_info): + section = 'atlas' + dir_env_var = 'ATLAS' + _lib_names = ['f77blas', 'cblas'] + if sys.platform[:7] == 'freebsd': + _lib_atlas = ['atlas_r'] + _lib_lapack = ['alapack_r'] + else: + _lib_atlas = ['atlas'] + _lib_lapack = ['lapack'] + + notfounderror = AtlasNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*', + 'sse', '3dnow', 'sse2']) + [d]) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_libs', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) + lapack_libs = self.get_libs('lapack_libs', self._lib_lapack) + atlas = None + lapack = None + atlas_1 = None + for d in lib_dirs: + atlas = self.check_libs2(d, atlas_libs, []) + if atlas is not None: + lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*']) + lapack = self.check_libs2(lib_dirs2, lapack_libs, []) + if lapack is not None: + break + if atlas: + atlas_1 = atlas + log.info(self.__class__) + if atlas is None: + atlas = atlas_1 + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + if lapack is not None: + dict_append(info, **lapack) + dict_append(info, **atlas) + elif 'lapack_atlas' in atlas['libraries']: + dict_append(info, **atlas) + dict_append(info, + define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)]) + self.set_info(**info) + return + else: + dict_append(info, **atlas) + dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)]) + message = textwrap.dedent(""" + ********************************************************************* + Could not find lapack library within the ATLAS installation. + ********************************************************************* + """) + warnings.warn(message, stacklevel=2) + self.set_info(**info) + return + + # Check if lapack library is complete, only warn if it is not. + lapack_dir = lapack['library_dirs'][0] + lapack_name = lapack['libraries'][0] + lapack_lib = None + lib_prefixes = ['lib'] + if sys.platform == 'win32': + lib_prefixes.append('') + for e in self.library_extensions(): + for prefix in lib_prefixes: + fn = os.path.join(lapack_dir, prefix + lapack_name + e) + if os.path.exists(fn): + lapack_lib = fn + break + if lapack_lib: + break + if lapack_lib is not None: + sz = os.stat(lapack_lib)[6] + if sz <= 4000 * 1024: + message = textwrap.dedent(""" + ********************************************************************* + Lapack library (from ATLAS) is probably incomplete: + size of %s is %sk (expected >4000k) + + Follow the instructions in the KNOWN PROBLEMS section of the file + numpy/INSTALL.txt. + ********************************************************************* + """) % (lapack_lib, sz / 1024) + warnings.warn(message, stacklevel=2) + else: + info['language'] = 'f77' + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(info, **atlas_extra_info) + + self.set_info(**info) + + +class atlas_blas_info(atlas_info): + _lib_names = ['f77blas', 'cblas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_libs', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_threads_info(atlas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class atlas_blas_threads_info(atlas_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['ptf77blas', 'ptcblas'] + + +class lapack_atlas_info(atlas_info): + _lib_names = ['lapack_atlas'] + atlas_info._lib_names + + +class lapack_atlas_threads_info(atlas_threads_info): + _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names + + +class atlas_3_10_info(atlas_info): + _lib_names = ['satlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_info(atlas_3_10_info): + _lib_names = ['satlas'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + info = {} + opt = self.get_option_single('atlas_lib', 'libraries') + atlas_libs = self.get_libs(opt, self._lib_names) + atlas = self.check_libs2(lib_dirs, atlas_libs, []) + if atlas is None: + return + include_dirs = self.get_include_dirs() + h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None]) + h = h[0] + if h: + h = os.path.dirname(h) + dict_append(info, include_dirs=[h]) + info['language'] = 'c' + info['define_macros'] = [('HAVE_CBLAS', None)] + + atlas_version, atlas_extra_info = get_atlas_version(**atlas) + dict_append(atlas, **atlas_extra_info) + + dict_append(info, **atlas) + + self.set_info(**info) + return + + +class atlas_3_10_threads_info(atlas_3_10_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + _lib_atlas = _lib_names + _lib_lapack = _lib_names + + +class atlas_3_10_blas_threads_info(atlas_3_10_blas_info): + dir_env_var = ['PTATLAS', 'ATLAS'] + _lib_names = ['tatlas'] + + +class lapack_atlas_3_10_info(atlas_3_10_info): + pass + + +class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info): + pass + + +class lapack_info(system_info): + section = 'lapack' + dir_env_var = 'LAPACK' + _lib_names = ['lapack'] + notfounderror = LapackNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('lapack_libs', 'libraries') + lapack_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, lapack_libs, []) + if info is None: + return + info['language'] = 'f77' + self.set_info(**info) + + +class lapack_src_info(system_info): + # LAPACK_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. + section = 'lapack_src' + dir_env_var = 'LAPACK_SRC' + notfounderror = LapackSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'dgesv.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + # The following is extracted from LAPACK-3.0/SRC/Makefile. + # Added missing names from lapack-lite-3.1.1/SRC/Makefile + # while keeping removed names for Lapack-3.0 compatibility. + allaux = ''' + ilaenv ieeeck lsame lsamen xerbla + iparmq + ''' # *.f + laux = ''' + bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1 + laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2 + lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre + larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4 + lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1 + lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf + stebz stedc steqr sterf + + larra larrc larrd larr larrk larrj larrr laneg laisnan isnan + lazq3 lazq4 + ''' # [s|d]*.f + lasrc = ''' + gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak + gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv + gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2 + geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd + gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal + gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd + ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein + hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0 + lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb + lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp + laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv + lartv larz larzb larzt laswp lasyf latbs latdf latps latrd + latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv + pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2 + potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri + pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs + spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv + sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2 + tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs + trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs + tzrqf tzrzf + + lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5 + ''' # [s|c|d|z]*.f + sd_lasrc = ''' + laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l + org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr + orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3 + ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx + sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd + stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd + sygvx sytd2 sytrd + ''' # [s|d]*.f + cz_lasrc = ''' + bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev + heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv + hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd + hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf + hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7 + laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe + laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv + spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq + ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2 + unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr + ''' # [c|z]*.f + ####### + sclaux = laux + ' econd ' # s*.f + dzlaux = laux + ' secnd ' # d*.f + slasrc = lasrc + sd_lasrc # s*.f + dlasrc = lasrc + sd_lasrc # d*.f + clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f + zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f + oclasrc = ' icmax1 scsum1 ' # *.f + ozlasrc = ' izmax1 dzsum1 ' # *.f + sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \ + + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \ + + ['c%s.f' % f for f in (clasrc).split()] \ + + ['z%s.f' % f for f in (zlasrc).split()] \ + + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()] + sources = [os.path.join(src_dir, f) for f in sources] + # Lapack 3.1: + src_dir2 = os.path.join(src_dir, '..', 'INSTALL') + sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz'] + # Lapack 3.2.1: + sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz'] + sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz'] + # Should we check here actual existence of source files? + # Yes, the file listing is different between 3.0 and 3.1 + # versions. + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + +atlas_version_c_text = r''' +/* This file is generated from numpy/distutils/system_info.py */ +void ATL_buildinfo(void); +int main(void) { + ATL_buildinfo(); + return 0; +} +''' + +_cached_atlas_version = {} + + +def get_atlas_version(**config): + libraries = config.get('libraries', []) + library_dirs = config.get('library_dirs', []) + key = (tuple(libraries), tuple(library_dirs)) + if key in _cached_atlas_version: + return _cached_atlas_version[key] + c = cmd_config(Distribution()) + atlas_version = None + info = {} + try: + s, o = c.get_output(atlas_version_c_text, + libraries=libraries, library_dirs=library_dirs, + ) + if s and re.search(r'undefined reference to `_gfortran', o, re.M): + s, o = c.get_output(atlas_version_c_text, + libraries=libraries + ['gfortran'], + library_dirs=library_dirs, + ) + if not s: + warnings.warn(textwrap.dedent(""" + ***************************************************** + Linkage with ATLAS requires gfortran. Use + + python setup.py config_fc --fcompiler=gnu95 ... + + when building extension libraries that use ATLAS. + Make sure that -lgfortran is used for C++ extensions. + ***************************************************** + """), stacklevel=2) + dict_append(info, language='f90', + define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)]) + except Exception: # failed to get version from file -- maybe on Windows + # look at directory name + for o in library_dirs: + m = re.search(r'ATLAS_(?P\d+[.]\d+[.]\d+)_', o) + if m: + atlas_version = m.group('version') + if atlas_version is not None: + break + + # final choice --- look at ATLAS_VERSION environment + # variable + if atlas_version is None: + atlas_version = os.environ.get('ATLAS_VERSION', None) + if atlas_version: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + else: + dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)]) + return atlas_version or '?.?.?', info + + if not s: + m = re.search(r'ATLAS version (?P\d+[.]\d+[.]\d+)', o) + if m: + atlas_version = m.group('version') + if atlas_version is None: + if re.search(r'undefined symbol: ATL_buildinfo', o, re.M): + atlas_version = '3.2.1_pre3.3.6' + else: + log.info('Status: %d', s) + log.info('Output: %s', o) + + elif atlas_version == '3.2.1_pre3.3.6': + dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)]) + else: + dict_append(info, define_macros=[( + 'ATLAS_INFO', _c_string_literal(atlas_version)) + ]) + result = _cached_atlas_version[key] = atlas_version, info + return result + + +class lapack_opt_info(system_info): + notfounderror = LapackNotFoundError + + # List of all known LAPACK libraries, in the default order + lapack_order = ['armpl', 'mkl', 'ssl2', 'openblas', 'flame', + 'accelerate', 'atlas', 'lapack'] + order_env_var_name = 'NPY_LAPACK_ORDER' + + def _calc_info_armpl(self): + info = get_info('lapack_armpl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_mkl(self): + info = get_info('lapack_mkl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_ssl2(self): + info = get_info('lapack_ssl2') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_openblas(self): + info = get_info('openblas_lapack') + if info: + self.set_info(**info) + return True + info = get_info('openblas_clapack') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_flame(self): + info = get_info('flame') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_atlas(self): + info = get_info('atlas_3_10_threads') + if not info: + info = get_info('atlas_3_10') + if not info: + info = get_info('atlas_threads') + if not info: + info = get_info('atlas') + if info: + # Figure out if ATLAS has lapack... + # If not we need the lapack library, but not BLAS! + l = info.get('define_macros', []) + if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \ + or ('ATLAS_WITHOUT_LAPACK', None) in l: + # Get LAPACK (with possible warnings) + # If not found we don't accept anything + # since we can't use ATLAS with LAPACK! + lapack_info = self._get_info_lapack() + if not lapack_info: + return False + dict_append(info, **lapack_info) + self.set_info(**info) + return True + return False + + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + + def _get_info_blas(self): + # Default to get the optimized BLAS implementation + info = get_info('blas_opt') + if not info: + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) + info_src = get_info('blas_src') + if not info_src: + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) + return {} + dict_append(info, libraries=[('fblas_src', info_src)]) + return info + + def _get_info_lapack(self): + info = get_info('lapack') + if not info: + warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3) + info_src = get_info('lapack_src') + if not info_src: + warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3) + return {} + dict_append(info, libraries=[('flapack_src', info_src)]) + return info + + def _calc_info_lapack(self): + info = self._get_info_lapack() + if info: + info_blas = self._get_info_blas() + dict_append(info, **info_blas) + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + self.set_info(**info) + return True + return False + + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_LAPACK_LIBS'].split() + self.set_info(**info) + return True + + def _calc_info(self, name): + return getattr(self, '_calc_info_{}'.format(name))() + + def calc_info(self): + lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name) + if len(unknown_order) > 0: + raise ValueError("lapack_opt_info user defined " + "LAPACK order has unacceptable " + "values: {}".format(unknown_order)) + + if 'NPY_LAPACK_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + + for lapack in lapack_order: + if self._calc_info(lapack): + return + + if 'lapack' not in lapack_order: + # Since the user may request *not* to use any library, we still need + # to raise warnings to signal missing packages! + warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2) + warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2) + + +class _ilp64_opt_info_mixin: + symbol_suffix = None + symbol_prefix = None + + def _check_info(self, info): + macros = dict(info.get('define_macros', [])) + prefix = macros.get('BLAS_SYMBOL_PREFIX', '') + suffix = macros.get('BLAS_SYMBOL_SUFFIX', '') + + if self.symbol_prefix not in (None, prefix): + return False + + if self.symbol_suffix not in (None, suffix): + return False + + return bool(info) + + +class lapack_ilp64_opt_info(lapack_opt_info, _ilp64_opt_info_mixin): + notfounderror = LapackILP64NotFoundError + lapack_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] + order_env_var_name = 'NPY_LAPACK_ILP64_ORDER' + + def _calc_info(self, name): + print('lapack_ilp64_opt_info._calc_info(name=%s)' % (name)) + info = get_info(name + '_lapack') + if self._check_info(info): + self.set_info(**info) + return True + else: + print('%s_lapack does not exist' % (name)) + return False + + +class lapack_ilp64_plain_opt_info(lapack_ilp64_opt_info): + # Same as lapack_ilp64_opt_info, but fix symbol names + symbol_prefix = '' + symbol_suffix = '' + + +class lapack64__opt_info(lapack_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '64_' + + +class blas_opt_info(system_info): + notfounderror = BlasNotFoundError + # List of all known BLAS libraries, in the default order + + blas_order = ['armpl', 'mkl', 'ssl2', 'blis', 'openblas', + 'accelerate', 'atlas', 'blas'] + order_env_var_name = 'NPY_BLAS_ORDER' + + def _calc_info_armpl(self): + info = get_info('blas_armpl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_mkl(self): + info = get_info('blas_mkl') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_ssl2(self): + info = get_info('blas_ssl2') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_blis(self): + info = get_info('blis') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_openblas(self): + info = get_info('openblas') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_atlas(self): + info = get_info('atlas_3_10_blas_threads') + if not info: + info = get_info('atlas_3_10_blas') + if not info: + info = get_info('atlas_blas_threads') + if not info: + info = get_info('atlas_blas') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_accelerate(self): + info = get_info('accelerate') + if info: + self.set_info(**info) + return True + return False + + def _calc_info_blas(self): + # Warn about a non-optimized BLAS library + warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3) + info = {} + dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)]) + + blas = get_info('blas') + if blas: + dict_append(info, **blas) + else: + # Not even BLAS was found! + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3) + + blas_src = get_info('blas_src') + if not blas_src: + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3) + return False + dict_append(info, libraries=[('fblas_src', blas_src)]) + + self.set_info(**info) + return True + + def _calc_info_from_envvar(self): + info = {} + info['language'] = 'f77' + info['libraries'] = [] + info['include_dirs'] = [] + info['define_macros'] = [] + info['extra_link_args'] = os.environ['NPY_BLAS_LIBS'].split() + if 'NPY_CBLAS_LIBS' in os.environ: + info['define_macros'].append(('HAVE_CBLAS', None)) + info['extra_link_args'].extend( + os.environ['NPY_CBLAS_LIBS'].split()) + self.set_info(**info) + return True + + def _calc_info(self, name): + return getattr(self, '_calc_info_{}'.format(name))() + + def calc_info(self): + blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name) + if len(unknown_order) > 0: + raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order)) + + if 'NPY_BLAS_LIBS' in os.environ: + # Bypass autodetection, set language to F77 and use env var linker + # flags directly + self._calc_info_from_envvar() + return + + for blas in blas_order: + if self._calc_info(blas): + return + + if 'blas' not in blas_order: + # Since the user may request *not* to use any library, we still need + # to raise warnings to signal missing packages! + warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2) + warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2) + + +class blas_ilp64_opt_info(blas_opt_info, _ilp64_opt_info_mixin): + notfounderror = BlasILP64NotFoundError + blas_order = ['openblas64_', 'openblas_ilp64', 'accelerate'] + order_env_var_name = 'NPY_BLAS_ILP64_ORDER' + + def _calc_info(self, name): + info = get_info(name) + if self._check_info(info): + self.set_info(**info) + return True + return False + + +class blas_ilp64_plain_opt_info(blas_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '' + + +class blas64__opt_info(blas_ilp64_opt_info): + symbol_prefix = '' + symbol_suffix = '64_' + + +class cblas_info(system_info): + section = 'cblas' + dir_env_var = 'CBLAS' + # No default as it's used only in blas_info + _lib_names = [] + notfounderror = BlasNotFoundError + + +class blas_info(system_info): + section = 'blas' + dir_env_var = 'BLAS' + _lib_names = ['blas'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + opt = self.get_option_single('blas_libs', 'libraries') + blas_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, blas_libs, []) + if info is None: + return + else: + info['include_dirs'] = self.get_include_dirs() + if platform.system() == 'Windows': + # The check for windows is needed because get_cblas_libs uses the + # same compiler that was used to compile Python and msvc is + # often not installed when mingw is being used. This rough + # treatment is not desirable, but windows is tricky. + info['language'] = 'f77' # XXX: is it generally true? + # If cblas is given as an option, use those + cblas_info_obj = cblas_info() + cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries') + cblas_libs = cblas_info_obj.get_libs(cblas_opt, None) + if cblas_libs: + info['libraries'] = cblas_libs + blas_libs + info['define_macros'] = [('HAVE_CBLAS', None)] + else: + lib = self.get_cblas_libs(info) + if lib is not None: + info['language'] = 'c' + info['libraries'] = lib + info['define_macros'] = [('HAVE_CBLAS', None)] + self.set_info(**info) + + def get_cblas_libs(self, info): + """ Check whether we can link with CBLAS interface + + This method will search through several combinations of libraries + to check whether CBLAS is present: + + 1. Libraries in ``info['libraries']``, as is + 2. As 1. but also explicitly adding ``'cblas'`` as a library + 3. As 1. but also explicitly adding ``'blas'`` as a library + 4. Check only library ``'cblas'`` + 5. Check only library ``'blas'`` + + Parameters + ---------- + info : dict + system information dictionary for compilation and linking + + Returns + ------- + libraries : list of str or None + a list of libraries that enables the use of CBLAS interface. + Returns None if not found or a compilation error occurs. + + Since 1.17 returns a list. + """ + # primitive cblas check by looking for the header and trying to link + # cblas or blas + c = customized_ccompiler() + tmpdir = tempfile.mkdtemp() + s = textwrap.dedent("""\ + #include + int main(int argc, const char *argv[]) + { + double a[4] = {1,2,3,4}; + double b[4] = {5,6,7,8}; + return cblas_ddot(4, a, 1, b, 1) > 10; + }""") + src = os.path.join(tmpdir, 'source.c') + try: + with open(src, 'w') as f: + f.write(s) + + try: + # check we can compile (find headers) + obj = c.compile([src], output_dir=tmpdir, + include_dirs=self.get_include_dirs()) + except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): + return None + + # check we can link (find library) + # some systems have separate cblas and blas libs. + for libs in [info['libraries'], ['cblas'] + info['libraries'], + ['blas'] + info['libraries'], ['cblas'], ['blas']]: + try: + c.link_executable(obj, os.path.join(tmpdir, "a.out"), + libraries=libs, + library_dirs=info['library_dirs'], + extra_postargs=info.get('extra_link_args', [])) + return libs + except distutils.ccompiler.LinkError: + pass + finally: + shutil.rmtree(tmpdir) + return None + + +class openblas_info(blas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + _require_symbols = [] + notfounderror = BlasNotFoundError + + @property + def symbol_prefix(self): + try: + return self.cp.get(self.section, 'symbol_prefix') + except NoOptionError: + return '' + + @property + def symbol_suffix(self): + try: + return self.cp.get(self.section, 'symbol_suffix') + except NoOptionError: + return '' + + def _calc_info(self): + c = customized_ccompiler() + + lib_dirs = self.get_lib_dirs() + + # Prefer to use libraries over openblas_libs + opt = self.get_option_single('openblas_libs', 'libraries') + openblas_libs = self.get_libs(opt, self._lib_names) + + info = self.check_libs(lib_dirs, openblas_libs, []) + + if c.compiler_type == "msvc" and info is None: + from numpy.distutils.fcompiler import new_fcompiler + f = new_fcompiler(c_compiler=c) + if f and f.compiler_type == 'gnu95': + # Try gfortran-compatible library files + info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs) + # Skip lapack check, we'd need build_ext to do it + skip_symbol_check = True + elif info: + skip_symbol_check = False + info['language'] = 'c' + + if info is None: + return None + + # Add extra info for OpenBLAS + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + + if not (skip_symbol_check or self.check_symbols(info)): + return None + + info['define_macros'] = [('HAVE_CBLAS', None)] + if self.symbol_prefix: + info['define_macros'] += [('BLAS_SYMBOL_PREFIX', self.symbol_prefix)] + if self.symbol_suffix: + info['define_macros'] += [ + ('BLAS_SYMBOL_SUFFIX', self.symbol_suffix), + ('OPENBLAS_ILP64_NAMING_SCHEME', None), + ] + + return info + + def calc_info(self): + info = self._calc_info() + if info is not None: + self.set_info(**info) + + def check_msvc_gfortran_libs(self, library_dirs, libraries): + # First, find the full path to each library directory + library_paths = [] + for library in libraries: + for library_dir in library_dirs: + # MinGW static ext will be .a + fullpath = os.path.join(library_dir, library + '.a') + if os.path.isfile(fullpath): + library_paths.append(fullpath) + break + else: + return None + + # Generate numpy.distutils virtual static library file + basename = self.__class__.__name__ + tmpdir = os.path.join(os.getcwd(), 'build', basename) + if not os.path.isdir(tmpdir): + os.makedirs(tmpdir) + + info = {'library_dirs': [tmpdir], + 'libraries': [basename], + 'language': 'f77'} + + fake_lib_file = os.path.join(tmpdir, basename + '.fobjects') + fake_clib_file = os.path.join(tmpdir, basename + '.cobjects') + with open(fake_lib_file, 'w') as f: + f.write("\n".join(library_paths)) + with open(fake_clib_file, 'w') as f: + pass + + return info + + def check_symbols(self, info): + res = False + c = customized_ccompiler() + + tmpdir = tempfile.mkdtemp() + + prototypes = "\n".join("void %s%s%s();" % (self.symbol_prefix, + symbol_name, + self.symbol_suffix) + for symbol_name in self._require_symbols) + calls = "\n".join("%s%s%s();" % (self.symbol_prefix, + symbol_name, + self.symbol_suffix) + for symbol_name in self._require_symbols) + s = textwrap.dedent("""\ + %(prototypes)s + int main(int argc, const char *argv[]) + { + %(calls)s + return 0; + }""") % dict(prototypes=prototypes, calls=calls) + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + try: + extra_args = info['extra_link_args'] + except Exception: + extra_args = [] + try: + with open(src, 'w') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + res = True + except distutils.ccompiler.LinkError: + res = False + finally: + shutil.rmtree(tmpdir) + return res + +class openblas_lapack_info(openblas_info): + section = 'openblas' + dir_env_var = 'OPENBLAS' + _lib_names = ['openblas'] + _require_symbols = ['zungqr_'] + notfounderror = BlasNotFoundError + +class openblas_clapack_info(openblas_lapack_info): + _lib_names = ['openblas', 'lapack'] + +class openblas_ilp64_info(openblas_info): + section = 'openblas_ilp64' + dir_env_var = 'OPENBLAS_ILP64' + _lib_names = ['openblas64'] + _require_symbols = ['dgemm_', 'cblas_dgemm'] + notfounderror = BlasILP64NotFoundError + + def _calc_info(self): + info = super()._calc_info() + if info is not None: + info['define_macros'] += [('HAVE_BLAS_ILP64', None)] + return info + +class openblas_ilp64_lapack_info(openblas_ilp64_info): + _require_symbols = ['dgemm_', 'cblas_dgemm', 'zungqr_', 'LAPACKE_zungqr'] + + def _calc_info(self): + info = super()._calc_info() + if info: + info['define_macros'] += [('HAVE_LAPACKE', None)] + return info + +class openblas64__info(openblas_ilp64_info): + # ILP64 Openblas, with default symbol suffix + section = 'openblas64_' + dir_env_var = 'OPENBLAS64_' + _lib_names = ['openblas64_'] + symbol_suffix = '64_' + symbol_prefix = '' + +class openblas64__lapack_info(openblas_ilp64_lapack_info, openblas64__info): + pass + +class blis_info(blas_info): + section = 'blis' + dir_env_var = 'BLIS' + _lib_names = ['blis'] + notfounderror = BlasNotFoundError + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + opt = self.get_option_single('blis_libs', 'libraries') + blis_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs2(lib_dirs, blis_libs, []) + if info is None: + return + + # Add include dirs + incl_dirs = self.get_include_dirs() + dict_append(info, + language='c', + define_macros=[('HAVE_CBLAS', None)], + include_dirs=incl_dirs) + self.set_info(**info) + + +class flame_info(system_info): + """ Usage of libflame for LAPACK operations + + This requires libflame to be compiled with lapack wrappers: + + ./configure --enable-lapack2flame ... + + Be aware that libflame 5.1.0 has some missing names in the shared library, so + if you have problems, try the static flame library. + """ + section = 'flame' + _lib_names = ['flame'] + notfounderror = FlameNotFoundError + + def check_embedded_lapack(self, info): + """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """ + c = customized_ccompiler() + + tmpdir = tempfile.mkdtemp() + s = textwrap.dedent("""\ + void zungqr_(); + int main(int argc, const char *argv[]) + { + zungqr_(); + return 0; + }""") + src = os.path.join(tmpdir, 'source.c') + out = os.path.join(tmpdir, 'a.out') + # Add the additional "extra" arguments + extra_args = info.get('extra_link_args', []) + try: + with open(src, 'w') as f: + f.write(s) + obj = c.compile([src], output_dir=tmpdir) + try: + c.link_executable(obj, out, libraries=info['libraries'], + library_dirs=info['library_dirs'], + extra_postargs=extra_args) + return True + except distutils.ccompiler.LinkError: + return False + finally: + shutil.rmtree(tmpdir) + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + flame_libs = self.get_libs('libraries', self._lib_names) + + info = self.check_libs2(lib_dirs, flame_libs, []) + if info is None: + return + + # Add the extra flag args to info + extra_info = self.calc_extra_info() + dict_append(info, **extra_info) + + if self.check_embedded_lapack(info): + # check if the user has supplied all information required + self.set_info(**info) + else: + # Try and get the BLAS lib to see if we can get it to work + blas_info = get_info('blas_opt') + if not blas_info: + # since we already failed once, this ain't going to work either + return + + # Now we need to merge the two dictionaries + for key in blas_info: + if isinstance(blas_info[key], list): + info[key] = info.get(key, []) + blas_info[key] + elif isinstance(blas_info[key], tuple): + info[key] = info.get(key, ()) + blas_info[key] + else: + info[key] = info.get(key, '') + blas_info[key] + + # Now check again + if self.check_embedded_lapack(info): + self.set_info(**info) + + +class accelerate_info(system_info): + section = 'accelerate' + _lib_names = ['accelerate', 'veclib'] + notfounderror = BlasNotFoundError + + def calc_info(self): + # Make possible to enable/disable from config file/env var + libraries = os.environ.get('ACCELERATE') + if libraries: + libraries = [libraries] + else: + libraries = self.get_libs('libraries', self._lib_names) + libraries = [lib.strip().lower() for lib in libraries] + + if (sys.platform == 'darwin' and + not os.getenv('_PYTHON_HOST_PLATFORM', None)): + # Use the system BLAS from Accelerate or vecLib under OSX + args = [] + link_args = [] + if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \ + 'x86_64' in get_platform() or \ + 'i386' in platform.platform(): + intel = 1 + else: + intel = 0 + if (os.path.exists('/System/Library/Frameworks' + '/Accelerate.framework/') and + 'accelerate' in libraries): + if intel: + args.extend(['-msse3']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,Accelerate']) + elif (os.path.exists('/System/Library/Frameworks' + '/vecLib.framework/') and + 'veclib' in libraries): + if intel: + args.extend(['-msse3']) + args.extend([ + '-I/System/Library/Frameworks/vecLib.framework/Headers']) + link_args.extend(['-Wl,-framework', '-Wl,vecLib']) + + if args: + macros = [ + ('NO_ATLAS_INFO', 3), + ('HAVE_CBLAS', None), + ('ACCELERATE_NEW_LAPACK', None), + ] + if(os.getenv('NPY_USE_BLAS_ILP64', None)): + print('Setting HAVE_BLAS_ILP64') + macros += [ + ('HAVE_BLAS_ILP64', None), + ('ACCELERATE_LAPACK_ILP64', None), + ] + self.set_info(extra_compile_args=args, + extra_link_args=link_args, + define_macros=macros) + + return + +class accelerate_lapack_info(accelerate_info): + def _calc_info(self): + return super()._calc_info() + +class blas_src_info(system_info): + # BLAS_SRC is deprecated, please do not use this! + # Build or install a BLAS library via your package manager or from + # source separately. + section = 'blas_src' + dir_env_var = 'BLAS_SRC' + notfounderror = BlasSrcNotFoundError + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['blas'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'daxpy.f')): + src_dir = d + break + if not src_dir: + #XXX: Get sources from netlib. May be ask first. + return + blas1 = ''' + caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot + dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2 + srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg + dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax + snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap + scabs1 + ''' + blas2 = ''' + cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv + chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv + dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv + sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger + stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc + zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2 + ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv + ''' + blas3 = ''' + cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k + dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm + ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm + ''' + sources = [os.path.join(src_dir, f + '.f') \ + for f in (blas1 + blas2 + blas3).split()] + #XXX: should we check here actual existence of source files? + sources = [f for f in sources if os.path.isfile(f)] + info = {'sources': sources, 'language': 'f77'} + self.set_info(**info) + + +class x11_info(system_info): + section = 'x11' + notfounderror = X11NotFoundError + _lib_names = ['X11'] + + def __init__(self): + system_info.__init__(self, + default_lib_dirs=default_x11_lib_dirs, + default_include_dirs=default_x11_include_dirs) + + def calc_info(self): + if sys.platform in ['win32']: + return + lib_dirs = self.get_lib_dirs() + include_dirs = self.get_include_dirs() + opt = self.get_option_single('x11_libs', 'libraries') + x11_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, x11_libs, []) + if info is None: + return + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, 'X11/X.h'): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + self.set_info(**info) + + +class _numpy_info(system_info): + section = 'Numeric' + modulename = 'Numeric' + notfounderror = NumericNotFoundError + + def __init__(self): + include_dirs = [] + try: + module = __import__(self.modulename) + prefix = [] + for name in module.__file__.split(os.sep): + if name == 'lib': + break + prefix.append(name) + + # Ask numpy for its own include path before attempting + # anything else + try: + include_dirs.append(getattr(module, 'get_include')()) + except AttributeError: + pass + + include_dirs.append(sysconfig.get_path('include')) + except ImportError: + pass + py_incl_dir = sysconfig.get_path('include') + include_dirs.append(py_incl_dir) + py_pincl_dir = sysconfig.get_path('platinclude') + if py_pincl_dir not in include_dirs: + include_dirs.append(py_pincl_dir) + for d in default_include_dirs: + d = os.path.join(d, os.path.basename(py_incl_dir)) + if d not in include_dirs: + include_dirs.append(d) + system_info.__init__(self, + default_lib_dirs=[], + default_include_dirs=include_dirs) + + def calc_info(self): + try: + module = __import__(self.modulename) + except ImportError: + return + info = {} + macros = [] + for v in ['__version__', 'version']: + vrs = getattr(module, v, None) + if vrs is None: + continue + macros = [(self.modulename.upper() + '_VERSION', + _c_string_literal(vrs)), + (self.modulename.upper(), None)] + break + dict_append(info, define_macros=macros) + include_dirs = self.get_include_dirs() + inc_dir = None + for d in include_dirs: + if self.combine_paths(d, + os.path.join(self.modulename, + 'arrayobject.h')): + inc_dir = d + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir]) + if info: + self.set_info(**info) + return + + +class numarray_info(_numpy_info): + section = 'numarray' + modulename = 'numarray' + + +class Numeric_info(_numpy_info): + section = 'Numeric' + modulename = 'Numeric' + + +class numpy_info(_numpy_info): + section = 'numpy' + modulename = 'numpy' + + +class numerix_info(system_info): + section = 'numerix' + + def calc_info(self): + which = None, None + if os.getenv("NUMERIX"): + which = os.getenv("NUMERIX"), "environment var" + # If all the above fail, default to numpy. + if which[0] is None: + which = "numpy", "defaulted" + try: + import numpy # noqa: F401 + which = "numpy", "defaulted" + except ImportError as e: + msg1 = str(e) + try: + import Numeric # noqa: F401 + which = "numeric", "defaulted" + except ImportError as e: + msg2 = str(e) + try: + import numarray # noqa: F401 + which = "numarray", "defaulted" + except ImportError as e: + msg3 = str(e) + log.info(msg1) + log.info(msg2) + log.info(msg3) + which = which[0].strip().lower(), which[1] + if which[0] not in ["numeric", "numarray", "numpy"]: + raise ValueError("numerix selector must be either 'Numeric' " + "or 'numarray' or 'numpy' but the value obtained" + " from the %s was '%s'." % (which[1], which[0])) + os.environ['NUMERIX'] = which[0] + self.set_info(**get_info(which[0])) + + +class f2py_info(system_info): + def calc_info(self): + try: + import numpy.f2py as f2py + except ImportError: + return + f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src') + self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')], + include_dirs=[f2py_dir]) + return + + +class boost_python_info(system_info): + section = 'boost_python' + dir_env_var = 'BOOST' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['boost*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'libs', 'python', 'src', + 'module.cpp')): + src_dir = d + break + if not src_dir: + return + py_incl_dirs = [sysconfig.get_path('include')] + py_pincl_dir = sysconfig.get_path('platinclude') + if py_pincl_dir not in py_incl_dirs: + py_incl_dirs.append(py_pincl_dir) + srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src') + bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp')) + bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp')) + info = {'libraries': [('boost_python_src', + {'include_dirs': [src_dir] + py_incl_dirs, + 'sources':bpl_srcs} + )], + 'include_dirs': [src_dir], + } + if info: + self.set_info(**info) + return + + +class agg2_info(system_info): + section = 'agg2' + dir_env_var = 'AGG2' + + def get_paths(self, section, key): + pre_dirs = system_info.get_paths(self, section, key) + dirs = [] + for d in pre_dirs: + dirs.extend([d] + self.combine_paths(d, ['agg2*'])) + return [d for d in dirs if os.path.isdir(d)] + + def calc_info(self): + src_dirs = self.get_src_dirs() + src_dir = '' + for d in src_dirs: + if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')): + src_dir = d + break + if not src_dir: + return + if sys.platform == 'win32': + agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform', + 'win32', 'agg_win32_bmp.cpp')) + else: + agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp')) + agg2_srcs += [os.path.join(src_dir, 'src', 'platform', + 'X11', + 'agg_platform_support.cpp')] + + info = {'libraries': + [('agg2_src', + {'sources': agg2_srcs, + 'include_dirs': [os.path.join(src_dir, 'include')], + } + )], + 'include_dirs': [os.path.join(src_dir, 'include')], + } + if info: + self.set_info(**info) + return + + +class _pkg_config_info(system_info): + section = None + config_env_var = 'PKG_CONFIG' + default_config_exe = 'pkg-config' + append_config_exe = '' + version_macro_name = None + release_macro_name = None + version_flag = '--modversion' + cflags_flag = '--cflags' + + def get_config_exe(self): + if self.config_env_var in os.environ: + return os.environ[self.config_env_var] + return self.default_config_exe + + def get_config_output(self, config_exe, option): + cmd = config_exe + ' ' + self.append_config_exe + ' ' + option + try: + o = subprocess.check_output(cmd) + except (OSError, subprocess.CalledProcessError): + pass + else: + o = filepath_from_subprocess_output(o) + return o + + def calc_info(self): + config_exe = find_executable(self.get_config_exe()) + if not config_exe: + log.warn('File not found: %s. Cannot determine %s info.' \ + % (config_exe, self.section)) + return + info = {} + macros = [] + libraries = [] + library_dirs = [] + include_dirs = [] + extra_link_args = [] + extra_compile_args = [] + version = self.get_config_output(config_exe, self.version_flag) + if version: + macros.append((self.__class__.__name__.split('.')[-1].upper(), + _c_string_literal(version))) + if self.version_macro_name: + macros.append((self.version_macro_name + '_%s' + % (version.replace('.', '_')), None)) + if self.release_macro_name: + release = self.get_config_output(config_exe, '--release') + if release: + macros.append((self.release_macro_name + '_%s' + % (release.replace('.', '_')), None)) + opts = self.get_config_output(config_exe, '--libs') + if opts: + for opt in opts.split(): + if opt[:2] == '-l': + libraries.append(opt[2:]) + elif opt[:2] == '-L': + library_dirs.append(opt[2:]) + else: + extra_link_args.append(opt) + opts = self.get_config_output(config_exe, self.cflags_flag) + if opts: + for opt in opts.split(): + if opt[:2] == '-I': + include_dirs.append(opt[2:]) + elif opt[:2] == '-D': + if '=' in opt: + n, v = opt[2:].split('=') + macros.append((n, v)) + else: + macros.append((opt[2:], None)) + else: + extra_compile_args.append(opt) + if macros: + dict_append(info, define_macros=macros) + if libraries: + dict_append(info, libraries=libraries) + if library_dirs: + dict_append(info, library_dirs=library_dirs) + if include_dirs: + dict_append(info, include_dirs=include_dirs) + if extra_link_args: + dict_append(info, extra_link_args=extra_link_args) + if extra_compile_args: + dict_append(info, extra_compile_args=extra_compile_args) + if info: + self.set_info(**info) + return + + +class wx_info(_pkg_config_info): + section = 'wx' + config_env_var = 'WX_CONFIG' + default_config_exe = 'wx-config' + append_config_exe = '' + version_macro_name = 'WX_VERSION' + release_macro_name = 'WX_RELEASE' + version_flag = '--version' + cflags_flag = '--cxxflags' + + +class gdk_pixbuf_xlib_2_info(_pkg_config_info): + section = 'gdk_pixbuf_xlib_2' + append_config_exe = 'gdk-pixbuf-xlib-2.0' + version_macro_name = 'GDK_PIXBUF_XLIB_VERSION' + + +class gdk_pixbuf_2_info(_pkg_config_info): + section = 'gdk_pixbuf_2' + append_config_exe = 'gdk-pixbuf-2.0' + version_macro_name = 'GDK_PIXBUF_VERSION' + + +class gdk_x11_2_info(_pkg_config_info): + section = 'gdk_x11_2' + append_config_exe = 'gdk-x11-2.0' + version_macro_name = 'GDK_X11_VERSION' + + +class gdk_2_info(_pkg_config_info): + section = 'gdk_2' + append_config_exe = 'gdk-2.0' + version_macro_name = 'GDK_VERSION' + + +class gdk_info(_pkg_config_info): + section = 'gdk' + append_config_exe = 'gdk' + version_macro_name = 'GDK_VERSION' + + +class gtkp_x11_2_info(_pkg_config_info): + section = 'gtkp_x11_2' + append_config_exe = 'gtk+-x11-2.0' + version_macro_name = 'GTK_X11_VERSION' + + +class gtkp_2_info(_pkg_config_info): + section = 'gtkp_2' + append_config_exe = 'gtk+-2.0' + version_macro_name = 'GTK_VERSION' + + +class xft_info(_pkg_config_info): + section = 'xft' + append_config_exe = 'xft' + version_macro_name = 'XFT_VERSION' + + +class freetype2_info(_pkg_config_info): + section = 'freetype2' + append_config_exe = 'freetype2' + version_macro_name = 'FREETYPE2_VERSION' + + +class amd_info(system_info): + section = 'amd' + dir_env_var = 'AMD' + _lib_names = ['amd'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('amd_libs', 'libraries') + amd_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, amd_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, 'amd.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_AMD_H', None)], + swig_opts=['-I' + inc_dir]) + + self.set_info(**info) + return + + +class umfpack_info(system_info): + section = 'umfpack' + dir_env_var = 'UMFPACK' + notfounderror = UmfpackNotFoundError + _lib_names = ['umfpack'] + + def calc_info(self): + lib_dirs = self.get_lib_dirs() + + opt = self.get_option_single('umfpack_libs', 'libraries') + umfpack_libs = self.get_libs(opt, self._lib_names) + info = self.check_libs(lib_dirs, umfpack_libs, []) + if info is None: + return + + include_dirs = self.get_include_dirs() + + inc_dir = None + for d in include_dirs: + p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h') + if p: + inc_dir = os.path.dirname(p[0]) + break + if inc_dir is not None: + dict_append(info, include_dirs=[inc_dir], + define_macros=[('SCIPY_UMFPACK_H', None)], + swig_opts=['-I' + inc_dir]) + + dict_append(info, **get_info('amd')) + + self.set_info(**info) + return + + +def combine_paths(*args, **kws): + """ Return a list of existing paths composed by all combinations of + items from arguments. + """ + r = [] + for a in args: + if not a: + continue + if is_string(a): + a = [a] + r.append(a) + args = r + if not args: + return [] + if len(args) == 1: + result = reduce(lambda a, b: a + b, map(glob, args[0]), []) + elif len(args) == 2: + result = [] + for a0 in args[0]: + for a1 in args[1]: + result.extend(glob(os.path.join(a0, a1))) + else: + result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:])) + log.debug('(paths: %s)', ','.join(result)) + return result + +language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3} +inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'} + + +def dict_append(d, **kws): + languages = [] + for k, v in kws.items(): + if k == 'language': + languages.append(v) + continue + if k in d: + if k in ['library_dirs', 'include_dirs', + 'extra_compile_args', 'extra_link_args', + 'runtime_library_dirs', 'define_macros']: + [d[k].append(vv) for vv in v if vv not in d[k]] + else: + d[k].extend(v) + else: + d[k] = v + if languages: + l = inv_language_map[max([language_map.get(l, 0) for l in languages])] + d['language'] = l + return + + +def parseCmdLine(argv=(None,)): + import optparse + parser = optparse.OptionParser("usage: %prog [-v] [info objs]") + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', + default=False, + help='be verbose and print more messages') + + opts, args = parser.parse_args(args=argv[1:]) + return opts, args + + +def show_all(argv=None): + import inspect + if argv is None: + argv = sys.argv + opts, args = parseCmdLine(argv) + if opts.verbose: + log.set_threshold(log.DEBUG) + else: + log.set_threshold(log.INFO) + show_only = [] + for n in args: + if n[-5:] != '_info': + n = n + '_info' + show_only.append(n) + show_all = not show_only + _gdict_ = globals().copy() + for name, c in _gdict_.items(): + if not inspect.isclass(c): + continue + if not issubclass(c, system_info) or c is system_info: + continue + if not show_all: + if name not in show_only: + continue + del show_only[show_only.index(name)] + conf = c() + conf.verbosity = 2 + # we don't need the result, but we want + # the side effect of printing diagnostics + conf.get_info() + if show_only: + log.info('Info classes not defined: %s', ','.join(show_only)) + +if __name__ == "__main__": + show_all() diff --git a/.venv/lib/python3.11/site-packages/numpy/distutils/unixccompiler.py b/.venv/lib/python3.11/site-packages/numpy/distutils/unixccompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..4884960fdf227497df644b71b129ce561e3b49e0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/distutils/unixccompiler.py @@ -0,0 +1,141 @@ +""" +unixccompiler - can handle very long argument lists for ar. + +""" +import os +import sys +import subprocess +import shlex + +from distutils.errors import CompileError, DistutilsExecError, LibError +from distutils.unixccompiler import UnixCCompiler +from numpy.distutils.ccompiler import replace_method +from numpy.distutils.misc_util import _commandline_dep_string +from numpy.distutils import log + +# Note that UnixCCompiler._compile appeared in Python 2.3 +def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): + """Compile a single source files with a Unix-style compiler.""" + # HP ad-hoc fix, see ticket 1383 + ccomp = self.compiler_so + if ccomp[0] == 'aCC': + # remove flags that will trigger ANSI-C mode for aCC + if '-Ae' in ccomp: + ccomp.remove('-Ae') + if '-Aa' in ccomp: + ccomp.remove('-Aa') + # add flags for (almost) sane C++ handling + ccomp += ['-AA'] + self.compiler_so = ccomp + # ensure OPT environment variable is read + if 'OPT' in os.environ: + # XXX who uses this? + from sysconfig import get_config_vars + opt = shlex.join(shlex.split(os.environ['OPT'])) + gcv_opt = shlex.join(shlex.split(get_config_vars('OPT')[0])) + ccomp_s = shlex.join(self.compiler_so) + if opt not in ccomp_s: + ccomp_s = ccomp_s.replace(gcv_opt, opt) + self.compiler_so = shlex.split(ccomp_s) + llink_s = shlex.join(self.linker_so) + if opt not in llink_s: + self.linker_so = self.linker_so + shlex.split(opt) + + display = '%s: %s' % (os.path.basename(self.compiler_so[0]), src) + + # gcc style automatic dependencies, outputs a makefile (-MF) that lists + # all headers needed by a c file as a side effect of compilation (-MMD) + if getattr(self, '_auto_depends', False): + deps = ['-MMD', '-MF', obj + '.d'] + else: + deps = [] + + try: + self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps + + extra_postargs, display = display) + except DistutilsExecError as e: + msg = str(e) + raise CompileError(msg) from None + + # add commandline flags to dependency file + if deps: + # After running the compiler, the file created will be in EBCDIC + # but will not be tagged as such. This tags it so the file does not + # have multiple different encodings being written to it + if sys.platform == 'zos': + subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d']) + with open(obj + '.d', 'a') as f: + f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts)) + +replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile) + + +def UnixCCompiler_create_static_lib(self, objects, output_libname, + output_dir=None, debug=0, target_lang=None): + """ + Build a static library in a separate sub-process. + + Parameters + ---------- + objects : list or tuple of str + List of paths to object files used to build the static library. + output_libname : str + The library name as an absolute or relative (if `output_dir` is used) + path. + output_dir : str, optional + The path to the output directory. Default is None, in which case + the ``output_dir`` attribute of the UnixCCompiler instance. + debug : bool, optional + This parameter is not used. + target_lang : str, optional + This parameter is not used. + + Returns + ------- + None + + """ + objects, output_dir = self._fix_object_args(objects, output_dir) + + output_filename = \ + self.library_filename(output_libname, output_dir=output_dir) + + if self._need_link(objects, output_filename): + try: + # previous .a may be screwed up; best to remove it first + # and recreate. + # Also, ar on OS X doesn't handle updating universal archives + os.unlink(output_filename) + except OSError: + pass + self.mkpath(os.path.dirname(output_filename)) + tmp_objects = objects + self.objects + while tmp_objects: + objects = tmp_objects[:50] + tmp_objects = tmp_objects[50:] + display = '%s: adding %d object files to %s' % ( + os.path.basename(self.archiver[0]), + len(objects), output_filename) + self.spawn(self.archiver + [output_filename] + objects, + display = display) + + # Not many Unices required ranlib anymore -- SunOS 4.x is, I + # think the only major Unix that does. Maybe we need some + # platform intelligence here to skip ranlib if it's not + # needed -- or maybe Python's configure script took care of + # it for us, hence the check for leading colon. + if self.ranlib: + display = '%s:@ %s' % (os.path.basename(self.ranlib[0]), + output_filename) + try: + self.spawn(self.ranlib + [output_filename], + display = display) + except DistutilsExecError as e: + msg = str(e) + raise LibError(msg) from None + else: + log.debug("skipping %s (up-to-date)", output_filename) + return + +replace_method(UnixCCompiler, 'create_static_lib', + UnixCCompiler_create_static_lib)