Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- evalkit_llava/lib/python3.10/configparser.py +1368 -0
- evalkit_llava/lib/python3.10/copy.py +304 -0
- evalkit_llava/lib/python3.10/difflib.py +2056 -0
- evalkit_llava/lib/python3.10/distutils/README +11 -0
- evalkit_llava/lib/python3.10/distutils/__init__.py +20 -0
- evalkit_llava/lib/python3.10/distutils/_msvccompiler.py +546 -0
- evalkit_llava/lib/python3.10/distutils/archive_util.py +256 -0
- evalkit_llava/lib/python3.10/distutils/cmd.py +403 -0
- evalkit_llava/lib/python3.10/distutils/config.py +130 -0
- evalkit_llava/lib/python3.10/distutils/debug.py +5 -0
- evalkit_llava/lib/python3.10/distutils/dep_util.py +92 -0
- evalkit_llava/lib/python3.10/distutils/dist.py +1256 -0
- evalkit_llava/lib/python3.10/distutils/extension.py +241 -0
- evalkit_llava/lib/python3.10/distutils/fancy_getopt.py +457 -0
- evalkit_llava/lib/python3.10/distutils/file_util.py +238 -0
- evalkit_llava/lib/python3.10/distutils/filelist.py +327 -0
- evalkit_llava/lib/python3.10/distutils/log.py +77 -0
- evalkit_llava/lib/python3.10/distutils/msvccompiler.py +643 -0
- evalkit_llava/lib/python3.10/distutils/sysconfig.py +353 -0
- evalkit_llava/lib/python3.10/distutils/tests/Setup.sample +67 -0
- evalkit_llava/lib/python3.10/distutils/tests/__init__.py +41 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_archive_util.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_bdist_dumb.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_bdist_rpm.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_build.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_build_ext.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_build_py.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_build_scripts.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_cmd.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_config.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_core.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_cygwinccompiler.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_dep_util.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_dir_util.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_extension.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_file_util.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_install_data.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_install_headers.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_msvccompiler.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_sdist.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_sysconfig.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_upload.cpython-310.pyc +0 -0
- evalkit_llava/lib/python3.10/distutils/tests/test_archive_util.py +396 -0
- evalkit_llava/lib/python3.10/distutils/tests/test_bdist.py +56 -0
- evalkit_llava/lib/python3.10/distutils/tests/test_build_ext.py +553 -0
- evalkit_llava/lib/python3.10/distutils/tests/test_build_py.py +179 -0
- evalkit_llava/lib/python3.10/distutils/tests/test_build_scripts.py +112 -0
- evalkit_llava/lib/python3.10/distutils/tests/test_check.py +163 -0
- evalkit_llava/lib/python3.10/distutils/tests/test_cmd.py +126 -0
- evalkit_llava/lib/python3.10/distutils/tests/test_file_util.py +124 -0
evalkit_llava/lib/python3.10/configparser.py
ADDED
|
@@ -0,0 +1,1368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Configuration file parser.
|
| 2 |
+
|
| 3 |
+
A configuration file consists of sections, lead by a "[section]" header,
|
| 4 |
+
and followed by "name: value" entries, with continuations and such in
|
| 5 |
+
the style of RFC 822.
|
| 6 |
+
|
| 7 |
+
Intrinsic defaults can be specified by passing them into the
|
| 8 |
+
ConfigParser constructor as a dictionary.
|
| 9 |
+
|
| 10 |
+
class:
|
| 11 |
+
|
| 12 |
+
ConfigParser -- responsible for parsing a list of
|
| 13 |
+
configuration files, and managing the parsed database.
|
| 14 |
+
|
| 15 |
+
methods:
|
| 16 |
+
|
| 17 |
+
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
|
| 18 |
+
delimiters=('=', ':'), comment_prefixes=('#', ';'),
|
| 19 |
+
inline_comment_prefixes=None, strict=True,
|
| 20 |
+
empty_lines_in_values=True, default_section='DEFAULT',
|
| 21 |
+
interpolation=<unset>, converters=<unset>):
|
| 22 |
+
|
| 23 |
+
Create the parser. When `defaults` is given, it is initialized into the
|
| 24 |
+
dictionary or intrinsic defaults. The keys must be strings, the values
|
| 25 |
+
must be appropriate for %()s string interpolation.
|
| 26 |
+
|
| 27 |
+
When `dict_type` is given, it will be used to create the dictionary
|
| 28 |
+
objects for the list of sections, for the options within a section, and
|
| 29 |
+
for the default values.
|
| 30 |
+
|
| 31 |
+
When `delimiters` is given, it will be used as the set of substrings
|
| 32 |
+
that divide keys from values.
|
| 33 |
+
|
| 34 |
+
When `comment_prefixes` is given, it will be used as the set of
|
| 35 |
+
substrings that prefix comments in empty lines. Comments can be
|
| 36 |
+
indented.
|
| 37 |
+
|
| 38 |
+
When `inline_comment_prefixes` is given, it will be used as the set of
|
| 39 |
+
substrings that prefix comments in non-empty lines.
|
| 40 |
+
|
| 41 |
+
When `strict` is True, the parser won't allow for any section or option
|
| 42 |
+
duplicates while reading from a single source (file, string or
|
| 43 |
+
dictionary). Default is True.
|
| 44 |
+
|
| 45 |
+
When `empty_lines_in_values` is False (default: True), each empty line
|
| 46 |
+
marks the end of an option. Otherwise, internal empty lines of
|
| 47 |
+
a multiline option are kept as part of the value.
|
| 48 |
+
|
| 49 |
+
When `allow_no_value` is True (default: False), options without
|
| 50 |
+
values are accepted; the value presented for these is None.
|
| 51 |
+
|
| 52 |
+
When `default_section` is given, the name of the special section is
|
| 53 |
+
named accordingly. By default it is called ``"DEFAULT"`` but this can
|
| 54 |
+
be customized to point to any other valid section name. Its current
|
| 55 |
+
value can be retrieved using the ``parser_instance.default_section``
|
| 56 |
+
attribute and may be modified at runtime.
|
| 57 |
+
|
| 58 |
+
When `interpolation` is given, it should be an Interpolation subclass
|
| 59 |
+
instance. It will be used as the handler for option value
|
| 60 |
+
pre-processing when using getters. RawConfigParser objects don't do
|
| 61 |
+
any sort of interpolation, whereas ConfigParser uses an instance of
|
| 62 |
+
BasicInterpolation. The library also provides a ``zc.buildbot``
|
| 63 |
+
inspired ExtendedInterpolation implementation.
|
| 64 |
+
|
| 65 |
+
When `converters` is given, it should be a dictionary where each key
|
| 66 |
+
represents the name of a type converter and each value is a callable
|
| 67 |
+
implementing the conversion from string to the desired datatype. Every
|
| 68 |
+
converter gets its corresponding get*() method on the parser object and
|
| 69 |
+
section proxies.
|
| 70 |
+
|
| 71 |
+
sections()
|
| 72 |
+
Return all the configuration section names, sans DEFAULT.
|
| 73 |
+
|
| 74 |
+
has_section(section)
|
| 75 |
+
Return whether the given section exists.
|
| 76 |
+
|
| 77 |
+
has_option(section, option)
|
| 78 |
+
Return whether the given option exists in the given section.
|
| 79 |
+
|
| 80 |
+
options(section)
|
| 81 |
+
Return list of configuration options for the named section.
|
| 82 |
+
|
| 83 |
+
read(filenames, encoding=None)
|
| 84 |
+
Read and parse the iterable of named configuration files, given by
|
| 85 |
+
name. A single filename is also allowed. Non-existing files
|
| 86 |
+
are ignored. Return list of successfully read files.
|
| 87 |
+
|
| 88 |
+
read_file(f, filename=None)
|
| 89 |
+
Read and parse one configuration file, given as a file object.
|
| 90 |
+
The filename defaults to f.name; it is only used in error
|
| 91 |
+
messages (if f has no `name` attribute, the string `<???>` is used).
|
| 92 |
+
|
| 93 |
+
read_string(string)
|
| 94 |
+
Read configuration from a given string.
|
| 95 |
+
|
| 96 |
+
read_dict(dictionary)
|
| 97 |
+
Read configuration from a dictionary. Keys are section names,
|
| 98 |
+
values are dictionaries with keys and values that should be present
|
| 99 |
+
in the section. If the used dictionary type preserves order, sections
|
| 100 |
+
and their keys will be added in order. Values are automatically
|
| 101 |
+
converted to strings.
|
| 102 |
+
|
| 103 |
+
get(section, option, raw=False, vars=None, fallback=_UNSET)
|
| 104 |
+
Return a string value for the named option. All % interpolations are
|
| 105 |
+
expanded in the return values, based on the defaults passed into the
|
| 106 |
+
constructor and the DEFAULT section. Additional substitutions may be
|
| 107 |
+
provided using the `vars` argument, which must be a dictionary whose
|
| 108 |
+
contents override any pre-existing defaults. If `option` is a key in
|
| 109 |
+
`vars`, the value from `vars` is used.
|
| 110 |
+
|
| 111 |
+
getint(section, options, raw=False, vars=None, fallback=_UNSET)
|
| 112 |
+
Like get(), but convert value to an integer.
|
| 113 |
+
|
| 114 |
+
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
|
| 115 |
+
Like get(), but convert value to a float.
|
| 116 |
+
|
| 117 |
+
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
|
| 118 |
+
Like get(), but convert value to a boolean (currently case
|
| 119 |
+
insensitively defined as 0, false, no, off for False, and 1, true,
|
| 120 |
+
yes, on for True). Returns False or True.
|
| 121 |
+
|
| 122 |
+
items(section=_UNSET, raw=False, vars=None)
|
| 123 |
+
If section is given, return a list of tuples with (name, value) for
|
| 124 |
+
each option in the section. Otherwise, return a list of tuples with
|
| 125 |
+
(section_name, section_proxy) for each section, including DEFAULTSECT.
|
| 126 |
+
|
| 127 |
+
remove_section(section)
|
| 128 |
+
Remove the given file section and all its options.
|
| 129 |
+
|
| 130 |
+
remove_option(section, option)
|
| 131 |
+
Remove the given option from the given section.
|
| 132 |
+
|
| 133 |
+
set(section, option, value)
|
| 134 |
+
Set the given option.
|
| 135 |
+
|
| 136 |
+
write(fp, space_around_delimiters=True)
|
| 137 |
+
Write the configuration state in .ini format. If
|
| 138 |
+
`space_around_delimiters` is True (the default), delimiters
|
| 139 |
+
between keys and values are surrounded by spaces.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
from collections.abc import MutableMapping
|
| 143 |
+
from collections import ChainMap as _ChainMap
|
| 144 |
+
import functools
|
| 145 |
+
import io
|
| 146 |
+
import itertools
|
| 147 |
+
import os
|
| 148 |
+
import re
|
| 149 |
+
import sys
|
| 150 |
+
import warnings
|
| 151 |
+
|
| 152 |
+
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
|
| 153 |
+
"NoOptionError", "InterpolationError", "InterpolationDepthError",
|
| 154 |
+
"InterpolationMissingOptionError", "InterpolationSyntaxError",
|
| 155 |
+
"ParsingError", "MissingSectionHeaderError",
|
| 156 |
+
"ConfigParser", "SafeConfigParser", "RawConfigParser",
|
| 157 |
+
"Interpolation", "BasicInterpolation", "ExtendedInterpolation",
|
| 158 |
+
"LegacyInterpolation", "SectionProxy", "ConverterMapping",
|
| 159 |
+
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
|
| 160 |
+
|
| 161 |
+
_default_dict = dict
|
| 162 |
+
DEFAULTSECT = "DEFAULT"
|
| 163 |
+
|
| 164 |
+
MAX_INTERPOLATION_DEPTH = 10
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
# exception classes
|
| 169 |
+
class Error(Exception):
|
| 170 |
+
"""Base class for ConfigParser exceptions."""
|
| 171 |
+
|
| 172 |
+
def __init__(self, msg=''):
|
| 173 |
+
self.message = msg
|
| 174 |
+
Exception.__init__(self, msg)
|
| 175 |
+
|
| 176 |
+
def __repr__(self):
|
| 177 |
+
return self.message
|
| 178 |
+
|
| 179 |
+
__str__ = __repr__
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class NoSectionError(Error):
|
| 183 |
+
"""Raised when no section matches a requested option."""
|
| 184 |
+
|
| 185 |
+
def __init__(self, section):
|
| 186 |
+
Error.__init__(self, 'No section: %r' % (section,))
|
| 187 |
+
self.section = section
|
| 188 |
+
self.args = (section, )
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class DuplicateSectionError(Error):
|
| 192 |
+
"""Raised when a section is repeated in an input source.
|
| 193 |
+
|
| 194 |
+
Possible repetitions that raise this exception are: multiple creation
|
| 195 |
+
using the API or in strict parsers when a section is found more than once
|
| 196 |
+
in a single input file, string or dictionary.
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
def __init__(self, section, source=None, lineno=None):
|
| 200 |
+
msg = [repr(section), " already exists"]
|
| 201 |
+
if source is not None:
|
| 202 |
+
message = ["While reading from ", repr(source)]
|
| 203 |
+
if lineno is not None:
|
| 204 |
+
message.append(" [line {0:2d}]".format(lineno))
|
| 205 |
+
message.append(": section ")
|
| 206 |
+
message.extend(msg)
|
| 207 |
+
msg = message
|
| 208 |
+
else:
|
| 209 |
+
msg.insert(0, "Section ")
|
| 210 |
+
Error.__init__(self, "".join(msg))
|
| 211 |
+
self.section = section
|
| 212 |
+
self.source = source
|
| 213 |
+
self.lineno = lineno
|
| 214 |
+
self.args = (section, source, lineno)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class DuplicateOptionError(Error):
|
| 218 |
+
"""Raised by strict parsers when an option is repeated in an input source.
|
| 219 |
+
|
| 220 |
+
Current implementation raises this exception only when an option is found
|
| 221 |
+
more than once in a single file, string or dictionary.
|
| 222 |
+
"""
|
| 223 |
+
|
| 224 |
+
def __init__(self, section, option, source=None, lineno=None):
|
| 225 |
+
msg = [repr(option), " in section ", repr(section),
|
| 226 |
+
" already exists"]
|
| 227 |
+
if source is not None:
|
| 228 |
+
message = ["While reading from ", repr(source)]
|
| 229 |
+
if lineno is not None:
|
| 230 |
+
message.append(" [line {0:2d}]".format(lineno))
|
| 231 |
+
message.append(": option ")
|
| 232 |
+
message.extend(msg)
|
| 233 |
+
msg = message
|
| 234 |
+
else:
|
| 235 |
+
msg.insert(0, "Option ")
|
| 236 |
+
Error.__init__(self, "".join(msg))
|
| 237 |
+
self.section = section
|
| 238 |
+
self.option = option
|
| 239 |
+
self.source = source
|
| 240 |
+
self.lineno = lineno
|
| 241 |
+
self.args = (section, option, source, lineno)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
class NoOptionError(Error):
|
| 245 |
+
"""A requested option was not found."""
|
| 246 |
+
|
| 247 |
+
def __init__(self, option, section):
|
| 248 |
+
Error.__init__(self, "No option %r in section: %r" %
|
| 249 |
+
(option, section))
|
| 250 |
+
self.option = option
|
| 251 |
+
self.section = section
|
| 252 |
+
self.args = (option, section)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class InterpolationError(Error):
|
| 256 |
+
"""Base class for interpolation-related exceptions."""
|
| 257 |
+
|
| 258 |
+
def __init__(self, option, section, msg):
|
| 259 |
+
Error.__init__(self, msg)
|
| 260 |
+
self.option = option
|
| 261 |
+
self.section = section
|
| 262 |
+
self.args = (option, section, msg)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
class InterpolationMissingOptionError(InterpolationError):
|
| 266 |
+
"""A string substitution required a setting which was not available."""
|
| 267 |
+
|
| 268 |
+
def __init__(self, option, section, rawval, reference):
|
| 269 |
+
msg = ("Bad value substitution: option {!r} in section {!r} contains "
|
| 270 |
+
"an interpolation key {!r} which is not a valid option name. "
|
| 271 |
+
"Raw value: {!r}".format(option, section, reference, rawval))
|
| 272 |
+
InterpolationError.__init__(self, option, section, msg)
|
| 273 |
+
self.reference = reference
|
| 274 |
+
self.args = (option, section, rawval, reference)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
class InterpolationSyntaxError(InterpolationError):
|
| 278 |
+
"""Raised when the source text contains invalid syntax.
|
| 279 |
+
|
| 280 |
+
Current implementation raises this exception when the source text into
|
| 281 |
+
which substitutions are made does not conform to the required syntax.
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
class InterpolationDepthError(InterpolationError):
|
| 286 |
+
"""Raised when substitutions are nested too deeply."""
|
| 287 |
+
|
| 288 |
+
def __init__(self, option, section, rawval):
|
| 289 |
+
msg = ("Recursion limit exceeded in value substitution: option {!r} "
|
| 290 |
+
"in section {!r} contains an interpolation key which "
|
| 291 |
+
"cannot be substituted in {} steps. Raw value: {!r}"
|
| 292 |
+
"".format(option, section, MAX_INTERPOLATION_DEPTH,
|
| 293 |
+
rawval))
|
| 294 |
+
InterpolationError.__init__(self, option, section, msg)
|
| 295 |
+
self.args = (option, section, rawval)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
class ParsingError(Error):
|
| 299 |
+
"""Raised when a configuration file does not follow legal syntax."""
|
| 300 |
+
|
| 301 |
+
def __init__(self, source=None, filename=None):
|
| 302 |
+
# Exactly one of `source'/`filename' arguments has to be given.
|
| 303 |
+
# `filename' kept for compatibility.
|
| 304 |
+
if filename and source:
|
| 305 |
+
raise ValueError("Cannot specify both `filename' and `source'. "
|
| 306 |
+
"Use `source'.")
|
| 307 |
+
elif not filename and not source:
|
| 308 |
+
raise ValueError("Required argument `source' not given.")
|
| 309 |
+
elif filename:
|
| 310 |
+
source = filename
|
| 311 |
+
Error.__init__(self, 'Source contains parsing errors: %r' % source)
|
| 312 |
+
self.source = source
|
| 313 |
+
self.errors = []
|
| 314 |
+
self.args = (source, )
|
| 315 |
+
|
| 316 |
+
@property
|
| 317 |
+
def filename(self):
|
| 318 |
+
"""Deprecated, use `source'."""
|
| 319 |
+
warnings.warn(
|
| 320 |
+
"The 'filename' attribute will be removed in Python 3.12. "
|
| 321 |
+
"Use 'source' instead.",
|
| 322 |
+
DeprecationWarning, stacklevel=2
|
| 323 |
+
)
|
| 324 |
+
return self.source
|
| 325 |
+
|
| 326 |
+
@filename.setter
|
| 327 |
+
def filename(self, value):
|
| 328 |
+
"""Deprecated, user `source'."""
|
| 329 |
+
warnings.warn(
|
| 330 |
+
"The 'filename' attribute will be removed in Python 3.12. "
|
| 331 |
+
"Use 'source' instead.",
|
| 332 |
+
DeprecationWarning, stacklevel=2
|
| 333 |
+
)
|
| 334 |
+
self.source = value
|
| 335 |
+
|
| 336 |
+
def append(self, lineno, line):
|
| 337 |
+
self.errors.append((lineno, line))
|
| 338 |
+
self.message += '\n\t[line %2d]: %s' % (lineno, line)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
class MissingSectionHeaderError(ParsingError):
|
| 342 |
+
"""Raised when a key-value pair is found before any section header."""
|
| 343 |
+
|
| 344 |
+
def __init__(self, filename, lineno, line):
|
| 345 |
+
Error.__init__(
|
| 346 |
+
self,
|
| 347 |
+
'File contains no section headers.\nfile: %r, line: %d\n%r' %
|
| 348 |
+
(filename, lineno, line))
|
| 349 |
+
self.source = filename
|
| 350 |
+
self.lineno = lineno
|
| 351 |
+
self.line = line
|
| 352 |
+
self.args = (filename, lineno, line)
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
# Used in parser getters to indicate the default behaviour when a specific
|
| 356 |
+
# option is not found it to raise an exception. Created to enable `None` as
|
| 357 |
+
# a valid fallback value.
|
| 358 |
+
_UNSET = object()
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
class Interpolation:
|
| 362 |
+
"""Dummy interpolation that passes the value through with no changes."""
|
| 363 |
+
|
| 364 |
+
def before_get(self, parser, section, option, value, defaults):
|
| 365 |
+
return value
|
| 366 |
+
|
| 367 |
+
def before_set(self, parser, section, option, value):
|
| 368 |
+
return value
|
| 369 |
+
|
| 370 |
+
def before_read(self, parser, section, option, value):
|
| 371 |
+
return value
|
| 372 |
+
|
| 373 |
+
def before_write(self, parser, section, option, value):
|
| 374 |
+
return value
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
class BasicInterpolation(Interpolation):
|
| 378 |
+
"""Interpolation as implemented in the classic ConfigParser.
|
| 379 |
+
|
| 380 |
+
The option values can contain format strings which refer to other values in
|
| 381 |
+
the same section, or values in the special default section.
|
| 382 |
+
|
| 383 |
+
For example:
|
| 384 |
+
|
| 385 |
+
something: %(dir)s/whatever
|
| 386 |
+
|
| 387 |
+
would resolve the "%(dir)s" to the value of dir. All reference
|
| 388 |
+
expansions are done late, on demand. If a user needs to use a bare % in
|
| 389 |
+
a configuration file, she can escape it by writing %%. Other % usage
|
| 390 |
+
is considered a user error and raises `InterpolationSyntaxError`."""
|
| 391 |
+
|
| 392 |
+
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
|
| 393 |
+
|
| 394 |
+
def before_get(self, parser, section, option, value, defaults):
|
| 395 |
+
L = []
|
| 396 |
+
self._interpolate_some(parser, option, L, value, section, defaults, 1)
|
| 397 |
+
return ''.join(L)
|
| 398 |
+
|
| 399 |
+
def before_set(self, parser, section, option, value):
|
| 400 |
+
tmp_value = value.replace('%%', '') # escaped percent signs
|
| 401 |
+
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
|
| 402 |
+
if '%' in tmp_value:
|
| 403 |
+
raise ValueError("invalid interpolation syntax in %r at "
|
| 404 |
+
"position %d" % (value, tmp_value.find('%')))
|
| 405 |
+
return value
|
| 406 |
+
|
| 407 |
+
def _interpolate_some(self, parser, option, accum, rest, section, map,
|
| 408 |
+
depth):
|
| 409 |
+
rawval = parser.get(section, option, raw=True, fallback=rest)
|
| 410 |
+
if depth > MAX_INTERPOLATION_DEPTH:
|
| 411 |
+
raise InterpolationDepthError(option, section, rawval)
|
| 412 |
+
while rest:
|
| 413 |
+
p = rest.find("%")
|
| 414 |
+
if p < 0:
|
| 415 |
+
accum.append(rest)
|
| 416 |
+
return
|
| 417 |
+
if p > 0:
|
| 418 |
+
accum.append(rest[:p])
|
| 419 |
+
rest = rest[p:]
|
| 420 |
+
# p is no longer used
|
| 421 |
+
c = rest[1:2]
|
| 422 |
+
if c == "%":
|
| 423 |
+
accum.append("%")
|
| 424 |
+
rest = rest[2:]
|
| 425 |
+
elif c == "(":
|
| 426 |
+
m = self._KEYCRE.match(rest)
|
| 427 |
+
if m is None:
|
| 428 |
+
raise InterpolationSyntaxError(option, section,
|
| 429 |
+
"bad interpolation variable reference %r" % rest)
|
| 430 |
+
var = parser.optionxform(m.group(1))
|
| 431 |
+
rest = rest[m.end():]
|
| 432 |
+
try:
|
| 433 |
+
v = map[var]
|
| 434 |
+
except KeyError:
|
| 435 |
+
raise InterpolationMissingOptionError(
|
| 436 |
+
option, section, rawval, var) from None
|
| 437 |
+
if "%" in v:
|
| 438 |
+
self._interpolate_some(parser, option, accum, v,
|
| 439 |
+
section, map, depth + 1)
|
| 440 |
+
else:
|
| 441 |
+
accum.append(v)
|
| 442 |
+
else:
|
| 443 |
+
raise InterpolationSyntaxError(
|
| 444 |
+
option, section,
|
| 445 |
+
"'%%' must be followed by '%%' or '(', "
|
| 446 |
+
"found: %r" % (rest,))
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
class ExtendedInterpolation(Interpolation):
|
| 450 |
+
"""Advanced variant of interpolation, supports the syntax used by
|
| 451 |
+
`zc.buildout`. Enables interpolation between sections."""
|
| 452 |
+
|
| 453 |
+
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
|
| 454 |
+
|
| 455 |
+
def before_get(self, parser, section, option, value, defaults):
|
| 456 |
+
L = []
|
| 457 |
+
self._interpolate_some(parser, option, L, value, section, defaults, 1)
|
| 458 |
+
return ''.join(L)
|
| 459 |
+
|
| 460 |
+
def before_set(self, parser, section, option, value):
|
| 461 |
+
tmp_value = value.replace('$$', '') # escaped dollar signs
|
| 462 |
+
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
|
| 463 |
+
if '$' in tmp_value:
|
| 464 |
+
raise ValueError("invalid interpolation syntax in %r at "
|
| 465 |
+
"position %d" % (value, tmp_value.find('$')))
|
| 466 |
+
return value
|
| 467 |
+
|
| 468 |
+
def _interpolate_some(self, parser, option, accum, rest, section, map,
|
| 469 |
+
depth):
|
| 470 |
+
rawval = parser.get(section, option, raw=True, fallback=rest)
|
| 471 |
+
if depth > MAX_INTERPOLATION_DEPTH:
|
| 472 |
+
raise InterpolationDepthError(option, section, rawval)
|
| 473 |
+
while rest:
|
| 474 |
+
p = rest.find("$")
|
| 475 |
+
if p < 0:
|
| 476 |
+
accum.append(rest)
|
| 477 |
+
return
|
| 478 |
+
if p > 0:
|
| 479 |
+
accum.append(rest[:p])
|
| 480 |
+
rest = rest[p:]
|
| 481 |
+
# p is no longer used
|
| 482 |
+
c = rest[1:2]
|
| 483 |
+
if c == "$":
|
| 484 |
+
accum.append("$")
|
| 485 |
+
rest = rest[2:]
|
| 486 |
+
elif c == "{":
|
| 487 |
+
m = self._KEYCRE.match(rest)
|
| 488 |
+
if m is None:
|
| 489 |
+
raise InterpolationSyntaxError(option, section,
|
| 490 |
+
"bad interpolation variable reference %r" % rest)
|
| 491 |
+
path = m.group(1).split(':')
|
| 492 |
+
rest = rest[m.end():]
|
| 493 |
+
sect = section
|
| 494 |
+
opt = option
|
| 495 |
+
try:
|
| 496 |
+
if len(path) == 1:
|
| 497 |
+
opt = parser.optionxform(path[0])
|
| 498 |
+
v = map[opt]
|
| 499 |
+
elif len(path) == 2:
|
| 500 |
+
sect = path[0]
|
| 501 |
+
opt = parser.optionxform(path[1])
|
| 502 |
+
v = parser.get(sect, opt, raw=True)
|
| 503 |
+
else:
|
| 504 |
+
raise InterpolationSyntaxError(
|
| 505 |
+
option, section,
|
| 506 |
+
"More than one ':' found: %r" % (rest,))
|
| 507 |
+
except (KeyError, NoSectionError, NoOptionError):
|
| 508 |
+
raise InterpolationMissingOptionError(
|
| 509 |
+
option, section, rawval, ":".join(path)) from None
|
| 510 |
+
if "$" in v:
|
| 511 |
+
self._interpolate_some(parser, opt, accum, v, sect,
|
| 512 |
+
dict(parser.items(sect, raw=True)),
|
| 513 |
+
depth + 1)
|
| 514 |
+
else:
|
| 515 |
+
accum.append(v)
|
| 516 |
+
else:
|
| 517 |
+
raise InterpolationSyntaxError(
|
| 518 |
+
option, section,
|
| 519 |
+
"'$' must be followed by '$' or '{', "
|
| 520 |
+
"found: %r" % (rest,))
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
class LegacyInterpolation(Interpolation):
|
| 524 |
+
"""Deprecated interpolation used in old versions of ConfigParser.
|
| 525 |
+
Use BasicInterpolation or ExtendedInterpolation instead."""
|
| 526 |
+
|
| 527 |
+
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
|
| 528 |
+
|
| 529 |
+
def before_get(self, parser, section, option, value, vars):
|
| 530 |
+
rawval = value
|
| 531 |
+
depth = MAX_INTERPOLATION_DEPTH
|
| 532 |
+
while depth: # Loop through this until it's done
|
| 533 |
+
depth -= 1
|
| 534 |
+
if value and "%(" in value:
|
| 535 |
+
replace = functools.partial(self._interpolation_replace,
|
| 536 |
+
parser=parser)
|
| 537 |
+
value = self._KEYCRE.sub(replace, value)
|
| 538 |
+
try:
|
| 539 |
+
value = value % vars
|
| 540 |
+
except KeyError as e:
|
| 541 |
+
raise InterpolationMissingOptionError(
|
| 542 |
+
option, section, rawval, e.args[0]) from None
|
| 543 |
+
else:
|
| 544 |
+
break
|
| 545 |
+
if value and "%(" in value:
|
| 546 |
+
raise InterpolationDepthError(option, section, rawval)
|
| 547 |
+
return value
|
| 548 |
+
|
| 549 |
+
def before_set(self, parser, section, option, value):
|
| 550 |
+
return value
|
| 551 |
+
|
| 552 |
+
@staticmethod
|
| 553 |
+
def _interpolation_replace(match, parser):
|
| 554 |
+
s = match.group(1)
|
| 555 |
+
if s is None:
|
| 556 |
+
return match.group()
|
| 557 |
+
else:
|
| 558 |
+
return "%%(%s)s" % parser.optionxform(s)
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
class RawConfigParser(MutableMapping):
|
| 562 |
+
"""ConfigParser that does not do interpolation."""
|
| 563 |
+
|
| 564 |
+
# Regular expressions for parsing section headers and options
|
| 565 |
+
_SECT_TMPL = r"""
|
| 566 |
+
\[ # [
|
| 567 |
+
(?P<header>.+) # very permissive!
|
| 568 |
+
\] # ]
|
| 569 |
+
"""
|
| 570 |
+
_OPT_TMPL = r"""
|
| 571 |
+
(?P<option>.*?) # very permissive!
|
| 572 |
+
\s*(?P<vi>{delim})\s* # any number of space/tab,
|
| 573 |
+
# followed by any of the
|
| 574 |
+
# allowed delimiters,
|
| 575 |
+
# followed by any space/tab
|
| 576 |
+
(?P<value>.*)$ # everything up to eol
|
| 577 |
+
"""
|
| 578 |
+
_OPT_NV_TMPL = r"""
|
| 579 |
+
(?P<option>.*?) # very permissive!
|
| 580 |
+
\s*(?: # any number of space/tab,
|
| 581 |
+
(?P<vi>{delim})\s* # optionally followed by
|
| 582 |
+
# any of the allowed
|
| 583 |
+
# delimiters, followed by any
|
| 584 |
+
# space/tab
|
| 585 |
+
(?P<value>.*))?$ # everything up to eol
|
| 586 |
+
"""
|
| 587 |
+
# Interpolation algorithm to be used if the user does not specify another
|
| 588 |
+
_DEFAULT_INTERPOLATION = Interpolation()
|
| 589 |
+
# Compiled regular expression for matching sections
|
| 590 |
+
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
|
| 591 |
+
# Compiled regular expression for matching options with typical separators
|
| 592 |
+
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
|
| 593 |
+
# Compiled regular expression for matching options with optional values
|
| 594 |
+
# delimited using typical separators
|
| 595 |
+
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
|
| 596 |
+
# Compiled regular expression for matching leading whitespace in a line
|
| 597 |
+
NONSPACECRE = re.compile(r"\S")
|
| 598 |
+
# Possible boolean values in the configuration.
|
| 599 |
+
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
|
| 600 |
+
'0': False, 'no': False, 'false': False, 'off': False}
|
| 601 |
+
|
| 602 |
+
def __init__(self, defaults=None, dict_type=_default_dict,
|
| 603 |
+
allow_no_value=False, *, delimiters=('=', ':'),
|
| 604 |
+
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
|
| 605 |
+
strict=True, empty_lines_in_values=True,
|
| 606 |
+
default_section=DEFAULTSECT,
|
| 607 |
+
interpolation=_UNSET, converters=_UNSET):
|
| 608 |
+
|
| 609 |
+
self._dict = dict_type
|
| 610 |
+
self._sections = self._dict()
|
| 611 |
+
self._defaults = self._dict()
|
| 612 |
+
self._converters = ConverterMapping(self)
|
| 613 |
+
self._proxies = self._dict()
|
| 614 |
+
self._proxies[default_section] = SectionProxy(self, default_section)
|
| 615 |
+
self._delimiters = tuple(delimiters)
|
| 616 |
+
if delimiters == ('=', ':'):
|
| 617 |
+
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
|
| 618 |
+
else:
|
| 619 |
+
d = "|".join(re.escape(d) for d in delimiters)
|
| 620 |
+
if allow_no_value:
|
| 621 |
+
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
|
| 622 |
+
re.VERBOSE)
|
| 623 |
+
else:
|
| 624 |
+
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
|
| 625 |
+
re.VERBOSE)
|
| 626 |
+
self._comment_prefixes = tuple(comment_prefixes or ())
|
| 627 |
+
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
|
| 628 |
+
self._strict = strict
|
| 629 |
+
self._allow_no_value = allow_no_value
|
| 630 |
+
self._empty_lines_in_values = empty_lines_in_values
|
| 631 |
+
self.default_section=default_section
|
| 632 |
+
self._interpolation = interpolation
|
| 633 |
+
if self._interpolation is _UNSET:
|
| 634 |
+
self._interpolation = self._DEFAULT_INTERPOLATION
|
| 635 |
+
if self._interpolation is None:
|
| 636 |
+
self._interpolation = Interpolation()
|
| 637 |
+
if converters is not _UNSET:
|
| 638 |
+
self._converters.update(converters)
|
| 639 |
+
if defaults:
|
| 640 |
+
self._read_defaults(defaults)
|
| 641 |
+
|
| 642 |
+
def defaults(self):
|
| 643 |
+
return self._defaults
|
| 644 |
+
|
| 645 |
+
def sections(self):
|
| 646 |
+
"""Return a list of section names, excluding [DEFAULT]"""
|
| 647 |
+
# self._sections will never have [DEFAULT] in it
|
| 648 |
+
return list(self._sections.keys())
|
| 649 |
+
|
| 650 |
+
def add_section(self, section):
|
| 651 |
+
"""Create a new section in the configuration.
|
| 652 |
+
|
| 653 |
+
Raise DuplicateSectionError if a section by the specified name
|
| 654 |
+
already exists. Raise ValueError if name is DEFAULT.
|
| 655 |
+
"""
|
| 656 |
+
if section == self.default_section:
|
| 657 |
+
raise ValueError('Invalid section name: %r' % section)
|
| 658 |
+
|
| 659 |
+
if section in self._sections:
|
| 660 |
+
raise DuplicateSectionError(section)
|
| 661 |
+
self._sections[section] = self._dict()
|
| 662 |
+
self._proxies[section] = SectionProxy(self, section)
|
| 663 |
+
|
| 664 |
+
def has_section(self, section):
|
| 665 |
+
"""Indicate whether the named section is present in the configuration.
|
| 666 |
+
|
| 667 |
+
The DEFAULT section is not acknowledged.
|
| 668 |
+
"""
|
| 669 |
+
return section in self._sections
|
| 670 |
+
|
| 671 |
+
def options(self, section):
|
| 672 |
+
"""Return a list of option names for the given section name."""
|
| 673 |
+
try:
|
| 674 |
+
opts = self._sections[section].copy()
|
| 675 |
+
except KeyError:
|
| 676 |
+
raise NoSectionError(section) from None
|
| 677 |
+
opts.update(self._defaults)
|
| 678 |
+
return list(opts.keys())
|
| 679 |
+
|
| 680 |
+
def read(self, filenames, encoding=None):
|
| 681 |
+
"""Read and parse a filename or an iterable of filenames.
|
| 682 |
+
|
| 683 |
+
Files that cannot be opened are silently ignored; this is
|
| 684 |
+
designed so that you can specify an iterable of potential
|
| 685 |
+
configuration file locations (e.g. current directory, user's
|
| 686 |
+
home directory, systemwide directory), and all existing
|
| 687 |
+
configuration files in the iterable will be read. A single
|
| 688 |
+
filename may also be given.
|
| 689 |
+
|
| 690 |
+
Return list of successfully read files.
|
| 691 |
+
"""
|
| 692 |
+
if isinstance(filenames, (str, bytes, os.PathLike)):
|
| 693 |
+
filenames = [filenames]
|
| 694 |
+
encoding = io.text_encoding(encoding)
|
| 695 |
+
read_ok = []
|
| 696 |
+
for filename in filenames:
|
| 697 |
+
try:
|
| 698 |
+
with open(filename, encoding=encoding) as fp:
|
| 699 |
+
self._read(fp, filename)
|
| 700 |
+
except OSError:
|
| 701 |
+
continue
|
| 702 |
+
if isinstance(filename, os.PathLike):
|
| 703 |
+
filename = os.fspath(filename)
|
| 704 |
+
read_ok.append(filename)
|
| 705 |
+
return read_ok
|
| 706 |
+
|
| 707 |
+
def read_file(self, f, source=None):
|
| 708 |
+
"""Like read() but the argument must be a file-like object.
|
| 709 |
+
|
| 710 |
+
The `f` argument must be iterable, returning one line at a time.
|
| 711 |
+
Optional second argument is the `source` specifying the name of the
|
| 712 |
+
file being read. If not given, it is taken from f.name. If `f` has no
|
| 713 |
+
`name` attribute, `<???>` is used.
|
| 714 |
+
"""
|
| 715 |
+
if source is None:
|
| 716 |
+
try:
|
| 717 |
+
source = f.name
|
| 718 |
+
except AttributeError:
|
| 719 |
+
source = '<???>'
|
| 720 |
+
self._read(f, source)
|
| 721 |
+
|
| 722 |
+
def read_string(self, string, source='<string>'):
|
| 723 |
+
"""Read configuration from a given string."""
|
| 724 |
+
sfile = io.StringIO(string)
|
| 725 |
+
self.read_file(sfile, source)
|
| 726 |
+
|
| 727 |
+
def read_dict(self, dictionary, source='<dict>'):
|
| 728 |
+
"""Read configuration from a dictionary.
|
| 729 |
+
|
| 730 |
+
Keys are section names, values are dictionaries with keys and values
|
| 731 |
+
that should be present in the section. If the used dictionary type
|
| 732 |
+
preserves order, sections and their keys will be added in order.
|
| 733 |
+
|
| 734 |
+
All types held in the dictionary are converted to strings during
|
| 735 |
+
reading, including section names, option names and keys.
|
| 736 |
+
|
| 737 |
+
Optional second argument is the `source` specifying the name of the
|
| 738 |
+
dictionary being read.
|
| 739 |
+
"""
|
| 740 |
+
elements_added = set()
|
| 741 |
+
for section, keys in dictionary.items():
|
| 742 |
+
section = str(section)
|
| 743 |
+
try:
|
| 744 |
+
self.add_section(section)
|
| 745 |
+
except (DuplicateSectionError, ValueError):
|
| 746 |
+
if self._strict and section in elements_added:
|
| 747 |
+
raise
|
| 748 |
+
elements_added.add(section)
|
| 749 |
+
for key, value in keys.items():
|
| 750 |
+
key = self.optionxform(str(key))
|
| 751 |
+
if value is not None:
|
| 752 |
+
value = str(value)
|
| 753 |
+
if self._strict and (section, key) in elements_added:
|
| 754 |
+
raise DuplicateOptionError(section, key, source)
|
| 755 |
+
elements_added.add((section, key))
|
| 756 |
+
self.set(section, key, value)
|
| 757 |
+
|
| 758 |
+
def readfp(self, fp, filename=None):
|
| 759 |
+
"""Deprecated, use read_file instead."""
|
| 760 |
+
warnings.warn(
|
| 761 |
+
"This method will be removed in Python 3.12. "
|
| 762 |
+
"Use 'parser.read_file()' instead.",
|
| 763 |
+
DeprecationWarning, stacklevel=2
|
| 764 |
+
)
|
| 765 |
+
self.read_file(fp, source=filename)
|
| 766 |
+
|
| 767 |
+
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
|
| 768 |
+
"""Get an option value for a given section.
|
| 769 |
+
|
| 770 |
+
If `vars` is provided, it must be a dictionary. The option is looked up
|
| 771 |
+
in `vars` (if provided), `section`, and in `DEFAULTSECT` in that order.
|
| 772 |
+
If the key is not found and `fallback` is provided, it is used as
|
| 773 |
+
a fallback value. `None` can be provided as a `fallback` value.
|
| 774 |
+
|
| 775 |
+
If interpolation is enabled and the optional argument `raw` is False,
|
| 776 |
+
all interpolations are expanded in the return values.
|
| 777 |
+
|
| 778 |
+
Arguments `raw`, `vars`, and `fallback` are keyword only.
|
| 779 |
+
|
| 780 |
+
The section DEFAULT is special.
|
| 781 |
+
"""
|
| 782 |
+
try:
|
| 783 |
+
d = self._unify_values(section, vars)
|
| 784 |
+
except NoSectionError:
|
| 785 |
+
if fallback is _UNSET:
|
| 786 |
+
raise
|
| 787 |
+
else:
|
| 788 |
+
return fallback
|
| 789 |
+
option = self.optionxform(option)
|
| 790 |
+
try:
|
| 791 |
+
value = d[option]
|
| 792 |
+
except KeyError:
|
| 793 |
+
if fallback is _UNSET:
|
| 794 |
+
raise NoOptionError(option, section)
|
| 795 |
+
else:
|
| 796 |
+
return fallback
|
| 797 |
+
|
| 798 |
+
if raw or value is None:
|
| 799 |
+
return value
|
| 800 |
+
else:
|
| 801 |
+
return self._interpolation.before_get(self, section, option, value,
|
| 802 |
+
d)
|
| 803 |
+
|
| 804 |
+
def _get(self, section, conv, option, **kwargs):
|
| 805 |
+
return conv(self.get(section, option, **kwargs))
|
| 806 |
+
|
| 807 |
+
def _get_conv(self, section, option, conv, *, raw=False, vars=None,
|
| 808 |
+
fallback=_UNSET, **kwargs):
|
| 809 |
+
try:
|
| 810 |
+
return self._get(section, conv, option, raw=raw, vars=vars,
|
| 811 |
+
**kwargs)
|
| 812 |
+
except (NoSectionError, NoOptionError):
|
| 813 |
+
if fallback is _UNSET:
|
| 814 |
+
raise
|
| 815 |
+
return fallback
|
| 816 |
+
|
| 817 |
+
# getint, getfloat and getboolean provided directly for backwards compat
|
| 818 |
+
def getint(self, section, option, *, raw=False, vars=None,
|
| 819 |
+
fallback=_UNSET, **kwargs):
|
| 820 |
+
return self._get_conv(section, option, int, raw=raw, vars=vars,
|
| 821 |
+
fallback=fallback, **kwargs)
|
| 822 |
+
|
| 823 |
+
def getfloat(self, section, option, *, raw=False, vars=None,
|
| 824 |
+
fallback=_UNSET, **kwargs):
|
| 825 |
+
return self._get_conv(section, option, float, raw=raw, vars=vars,
|
| 826 |
+
fallback=fallback, **kwargs)
|
| 827 |
+
|
| 828 |
+
def getboolean(self, section, option, *, raw=False, vars=None,
|
| 829 |
+
fallback=_UNSET, **kwargs):
|
| 830 |
+
return self._get_conv(section, option, self._convert_to_boolean,
|
| 831 |
+
raw=raw, vars=vars, fallback=fallback, **kwargs)
|
| 832 |
+
|
| 833 |
+
def items(self, section=_UNSET, raw=False, vars=None):
|
| 834 |
+
"""Return a list of (name, value) tuples for each option in a section.
|
| 835 |
+
|
| 836 |
+
All % interpolations are expanded in the return values, based on the
|
| 837 |
+
defaults passed into the constructor, unless the optional argument
|
| 838 |
+
`raw` is true. Additional substitutions may be provided using the
|
| 839 |
+
`vars` argument, which must be a dictionary whose contents overrides
|
| 840 |
+
any pre-existing defaults.
|
| 841 |
+
|
| 842 |
+
The section DEFAULT is special.
|
| 843 |
+
"""
|
| 844 |
+
if section is _UNSET:
|
| 845 |
+
return super().items()
|
| 846 |
+
d = self._defaults.copy()
|
| 847 |
+
try:
|
| 848 |
+
d.update(self._sections[section])
|
| 849 |
+
except KeyError:
|
| 850 |
+
if section != self.default_section:
|
| 851 |
+
raise NoSectionError(section)
|
| 852 |
+
orig_keys = list(d.keys())
|
| 853 |
+
# Update with the entry specific variables
|
| 854 |
+
if vars:
|
| 855 |
+
for key, value in vars.items():
|
| 856 |
+
d[self.optionxform(key)] = value
|
| 857 |
+
value_getter = lambda option: self._interpolation.before_get(self,
|
| 858 |
+
section, option, d[option], d)
|
| 859 |
+
if raw:
|
| 860 |
+
value_getter = lambda option: d[option]
|
| 861 |
+
return [(option, value_getter(option)) for option in orig_keys]
|
| 862 |
+
|
| 863 |
+
def popitem(self):
|
| 864 |
+
"""Remove a section from the parser and return it as
|
| 865 |
+
a (section_name, section_proxy) tuple. If no section is present, raise
|
| 866 |
+
KeyError.
|
| 867 |
+
|
| 868 |
+
The section DEFAULT is never returned because it cannot be removed.
|
| 869 |
+
"""
|
| 870 |
+
for key in self.sections():
|
| 871 |
+
value = self[key]
|
| 872 |
+
del self[key]
|
| 873 |
+
return key, value
|
| 874 |
+
raise KeyError
|
| 875 |
+
|
| 876 |
+
def optionxform(self, optionstr):
|
| 877 |
+
return optionstr.lower()
|
| 878 |
+
|
| 879 |
+
def has_option(self, section, option):
|
| 880 |
+
"""Check for the existence of a given option in a given section.
|
| 881 |
+
If the specified `section` is None or an empty string, DEFAULT is
|
| 882 |
+
assumed. If the specified `section` does not exist, returns False."""
|
| 883 |
+
if not section or section == self.default_section:
|
| 884 |
+
option = self.optionxform(option)
|
| 885 |
+
return option in self._defaults
|
| 886 |
+
elif section not in self._sections:
|
| 887 |
+
return False
|
| 888 |
+
else:
|
| 889 |
+
option = self.optionxform(option)
|
| 890 |
+
return (option in self._sections[section]
|
| 891 |
+
or option in self._defaults)
|
| 892 |
+
|
| 893 |
+
def set(self, section, option, value=None):
|
| 894 |
+
"""Set an option."""
|
| 895 |
+
if value:
|
| 896 |
+
value = self._interpolation.before_set(self, section, option,
|
| 897 |
+
value)
|
| 898 |
+
if not section or section == self.default_section:
|
| 899 |
+
sectdict = self._defaults
|
| 900 |
+
else:
|
| 901 |
+
try:
|
| 902 |
+
sectdict = self._sections[section]
|
| 903 |
+
except KeyError:
|
| 904 |
+
raise NoSectionError(section) from None
|
| 905 |
+
sectdict[self.optionxform(option)] = value
|
| 906 |
+
|
| 907 |
+
def write(self, fp, space_around_delimiters=True):
|
| 908 |
+
"""Write an .ini-format representation of the configuration state.
|
| 909 |
+
|
| 910 |
+
If `space_around_delimiters` is True (the default), delimiters
|
| 911 |
+
between keys and values are surrounded by spaces.
|
| 912 |
+
|
| 913 |
+
Please note that comments in the original configuration file are not
|
| 914 |
+
preserved when writing the configuration back.
|
| 915 |
+
"""
|
| 916 |
+
if space_around_delimiters:
|
| 917 |
+
d = " {} ".format(self._delimiters[0])
|
| 918 |
+
else:
|
| 919 |
+
d = self._delimiters[0]
|
| 920 |
+
if self._defaults:
|
| 921 |
+
self._write_section(fp, self.default_section,
|
| 922 |
+
self._defaults.items(), d)
|
| 923 |
+
for section in self._sections:
|
| 924 |
+
self._write_section(fp, section,
|
| 925 |
+
self._sections[section].items(), d)
|
| 926 |
+
|
| 927 |
+
def _write_section(self, fp, section_name, section_items, delimiter):
|
| 928 |
+
"""Write a single section to the specified `fp`."""
|
| 929 |
+
fp.write("[{}]\n".format(section_name))
|
| 930 |
+
for key, value in section_items:
|
| 931 |
+
value = self._interpolation.before_write(self, section_name, key,
|
| 932 |
+
value)
|
| 933 |
+
if value is not None or not self._allow_no_value:
|
| 934 |
+
value = delimiter + str(value).replace('\n', '\n\t')
|
| 935 |
+
else:
|
| 936 |
+
value = ""
|
| 937 |
+
fp.write("{}{}\n".format(key, value))
|
| 938 |
+
fp.write("\n")
|
| 939 |
+
|
| 940 |
+
def remove_option(self, section, option):
|
| 941 |
+
"""Remove an option."""
|
| 942 |
+
if not section or section == self.default_section:
|
| 943 |
+
sectdict = self._defaults
|
| 944 |
+
else:
|
| 945 |
+
try:
|
| 946 |
+
sectdict = self._sections[section]
|
| 947 |
+
except KeyError:
|
| 948 |
+
raise NoSectionError(section) from None
|
| 949 |
+
option = self.optionxform(option)
|
| 950 |
+
existed = option in sectdict
|
| 951 |
+
if existed:
|
| 952 |
+
del sectdict[option]
|
| 953 |
+
return existed
|
| 954 |
+
|
| 955 |
+
def remove_section(self, section):
|
| 956 |
+
"""Remove a file section."""
|
| 957 |
+
existed = section in self._sections
|
| 958 |
+
if existed:
|
| 959 |
+
del self._sections[section]
|
| 960 |
+
del self._proxies[section]
|
| 961 |
+
return existed
|
| 962 |
+
|
| 963 |
+
def __getitem__(self, key):
|
| 964 |
+
if key != self.default_section and not self.has_section(key):
|
| 965 |
+
raise KeyError(key)
|
| 966 |
+
return self._proxies[key]
|
| 967 |
+
|
| 968 |
+
def __setitem__(self, key, value):
|
| 969 |
+
# To conform with the mapping protocol, overwrites existing values in
|
| 970 |
+
# the section.
|
| 971 |
+
if key in self and self[key] is value:
|
| 972 |
+
return
|
| 973 |
+
# XXX this is not atomic if read_dict fails at any point. Then again,
|
| 974 |
+
# no update method in configparser is atomic in this implementation.
|
| 975 |
+
if key == self.default_section:
|
| 976 |
+
self._defaults.clear()
|
| 977 |
+
elif key in self._sections:
|
| 978 |
+
self._sections[key].clear()
|
| 979 |
+
self.read_dict({key: value})
|
| 980 |
+
|
| 981 |
+
def __delitem__(self, key):
|
| 982 |
+
if key == self.default_section:
|
| 983 |
+
raise ValueError("Cannot remove the default section.")
|
| 984 |
+
if not self.has_section(key):
|
| 985 |
+
raise KeyError(key)
|
| 986 |
+
self.remove_section(key)
|
| 987 |
+
|
| 988 |
+
def __contains__(self, key):
|
| 989 |
+
return key == self.default_section or self.has_section(key)
|
| 990 |
+
|
| 991 |
+
def __len__(self):
|
| 992 |
+
return len(self._sections) + 1 # the default section
|
| 993 |
+
|
| 994 |
+
def __iter__(self):
|
| 995 |
+
# XXX does it break when underlying container state changed?
|
| 996 |
+
return itertools.chain((self.default_section,), self._sections.keys())
|
| 997 |
+
|
| 998 |
+
def _read(self, fp, fpname):
|
| 999 |
+
"""Parse a sectioned configuration file.
|
| 1000 |
+
|
| 1001 |
+
Each section in a configuration file contains a header, indicated by
|
| 1002 |
+
a name in square brackets (`[]`), plus key/value options, indicated by
|
| 1003 |
+
`name` and `value` delimited with a specific substring (`=` or `:` by
|
| 1004 |
+
default).
|
| 1005 |
+
|
| 1006 |
+
Values can span multiple lines, as long as they are indented deeper
|
| 1007 |
+
than the first line of the value. Depending on the parser's mode, blank
|
| 1008 |
+
lines may be treated as parts of multiline values or ignored.
|
| 1009 |
+
|
| 1010 |
+
Configuration files may include comments, prefixed by specific
|
| 1011 |
+
characters (`#` and `;` by default). Comments may appear on their own
|
| 1012 |
+
in an otherwise empty line or may be entered in lines holding values or
|
| 1013 |
+
section names. Please note that comments get stripped off when reading configuration files.
|
| 1014 |
+
"""
|
| 1015 |
+
elements_added = set()
|
| 1016 |
+
cursect = None # None, or a dictionary
|
| 1017 |
+
sectname = None
|
| 1018 |
+
optname = None
|
| 1019 |
+
lineno = 0
|
| 1020 |
+
indent_level = 0
|
| 1021 |
+
e = None # None, or an exception
|
| 1022 |
+
for lineno, line in enumerate(fp, start=1):
|
| 1023 |
+
comment_start = sys.maxsize
|
| 1024 |
+
# strip inline comments
|
| 1025 |
+
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
|
| 1026 |
+
while comment_start == sys.maxsize and inline_prefixes:
|
| 1027 |
+
next_prefixes = {}
|
| 1028 |
+
for prefix, index in inline_prefixes.items():
|
| 1029 |
+
index = line.find(prefix, index+1)
|
| 1030 |
+
if index == -1:
|
| 1031 |
+
continue
|
| 1032 |
+
next_prefixes[prefix] = index
|
| 1033 |
+
if index == 0 or (index > 0 and line[index-1].isspace()):
|
| 1034 |
+
comment_start = min(comment_start, index)
|
| 1035 |
+
inline_prefixes = next_prefixes
|
| 1036 |
+
# strip full line comments
|
| 1037 |
+
for prefix in self._comment_prefixes:
|
| 1038 |
+
if line.strip().startswith(prefix):
|
| 1039 |
+
comment_start = 0
|
| 1040 |
+
break
|
| 1041 |
+
if comment_start == sys.maxsize:
|
| 1042 |
+
comment_start = None
|
| 1043 |
+
value = line[:comment_start].strip()
|
| 1044 |
+
if not value:
|
| 1045 |
+
if self._empty_lines_in_values:
|
| 1046 |
+
# add empty line to the value, but only if there was no
|
| 1047 |
+
# comment on the line
|
| 1048 |
+
if (comment_start is None and
|
| 1049 |
+
cursect is not None and
|
| 1050 |
+
optname and
|
| 1051 |
+
cursect[optname] is not None):
|
| 1052 |
+
cursect[optname].append('') # newlines added at join
|
| 1053 |
+
else:
|
| 1054 |
+
# empty line marks end of value
|
| 1055 |
+
indent_level = sys.maxsize
|
| 1056 |
+
continue
|
| 1057 |
+
# continuation line?
|
| 1058 |
+
first_nonspace = self.NONSPACECRE.search(line)
|
| 1059 |
+
cur_indent_level = first_nonspace.start() if first_nonspace else 0
|
| 1060 |
+
if (cursect is not None and optname and
|
| 1061 |
+
cur_indent_level > indent_level):
|
| 1062 |
+
cursect[optname].append(value)
|
| 1063 |
+
# a section header or option header?
|
| 1064 |
+
else:
|
| 1065 |
+
indent_level = cur_indent_level
|
| 1066 |
+
# is it a section header?
|
| 1067 |
+
mo = self.SECTCRE.match(value)
|
| 1068 |
+
if mo:
|
| 1069 |
+
sectname = mo.group('header')
|
| 1070 |
+
if sectname in self._sections:
|
| 1071 |
+
if self._strict and sectname in elements_added:
|
| 1072 |
+
raise DuplicateSectionError(sectname, fpname,
|
| 1073 |
+
lineno)
|
| 1074 |
+
cursect = self._sections[sectname]
|
| 1075 |
+
elements_added.add(sectname)
|
| 1076 |
+
elif sectname == self.default_section:
|
| 1077 |
+
cursect = self._defaults
|
| 1078 |
+
else:
|
| 1079 |
+
cursect = self._dict()
|
| 1080 |
+
self._sections[sectname] = cursect
|
| 1081 |
+
self._proxies[sectname] = SectionProxy(self, sectname)
|
| 1082 |
+
elements_added.add(sectname)
|
| 1083 |
+
# So sections can't start with a continuation line
|
| 1084 |
+
optname = None
|
| 1085 |
+
# no section header in the file?
|
| 1086 |
+
elif cursect is None:
|
| 1087 |
+
raise MissingSectionHeaderError(fpname, lineno, line)
|
| 1088 |
+
# an option line?
|
| 1089 |
+
else:
|
| 1090 |
+
mo = self._optcre.match(value)
|
| 1091 |
+
if mo:
|
| 1092 |
+
optname, vi, optval = mo.group('option', 'vi', 'value')
|
| 1093 |
+
if not optname:
|
| 1094 |
+
e = self._handle_error(e, fpname, lineno, line)
|
| 1095 |
+
optname = self.optionxform(optname.rstrip())
|
| 1096 |
+
if (self._strict and
|
| 1097 |
+
(sectname, optname) in elements_added):
|
| 1098 |
+
raise DuplicateOptionError(sectname, optname,
|
| 1099 |
+
fpname, lineno)
|
| 1100 |
+
elements_added.add((sectname, optname))
|
| 1101 |
+
# This check is fine because the OPTCRE cannot
|
| 1102 |
+
# match if it would set optval to None
|
| 1103 |
+
if optval is not None:
|
| 1104 |
+
optval = optval.strip()
|
| 1105 |
+
cursect[optname] = [optval]
|
| 1106 |
+
else:
|
| 1107 |
+
# valueless option handling
|
| 1108 |
+
cursect[optname] = None
|
| 1109 |
+
else:
|
| 1110 |
+
# a non-fatal parsing error occurred. set up the
|
| 1111 |
+
# exception but keep going. the exception will be
|
| 1112 |
+
# raised at the end of the file and will contain a
|
| 1113 |
+
# list of all bogus lines
|
| 1114 |
+
e = self._handle_error(e, fpname, lineno, line)
|
| 1115 |
+
self._join_multiline_values()
|
| 1116 |
+
# if any parsing errors occurred, raise an exception
|
| 1117 |
+
if e:
|
| 1118 |
+
raise e
|
| 1119 |
+
|
| 1120 |
+
def _join_multiline_values(self):
|
| 1121 |
+
defaults = self.default_section, self._defaults
|
| 1122 |
+
all_sections = itertools.chain((defaults,),
|
| 1123 |
+
self._sections.items())
|
| 1124 |
+
for section, options in all_sections:
|
| 1125 |
+
for name, val in options.items():
|
| 1126 |
+
if isinstance(val, list):
|
| 1127 |
+
val = '\n'.join(val).rstrip()
|
| 1128 |
+
options[name] = self._interpolation.before_read(self,
|
| 1129 |
+
section,
|
| 1130 |
+
name, val)
|
| 1131 |
+
|
| 1132 |
+
def _read_defaults(self, defaults):
|
| 1133 |
+
"""Read the defaults passed in the initializer.
|
| 1134 |
+
Note: values can be non-string."""
|
| 1135 |
+
for key, value in defaults.items():
|
| 1136 |
+
self._defaults[self.optionxform(key)] = value
|
| 1137 |
+
|
| 1138 |
+
def _handle_error(self, exc, fpname, lineno, line):
|
| 1139 |
+
if not exc:
|
| 1140 |
+
exc = ParsingError(fpname)
|
| 1141 |
+
exc.append(lineno, repr(line))
|
| 1142 |
+
return exc
|
| 1143 |
+
|
| 1144 |
+
def _unify_values(self, section, vars):
|
| 1145 |
+
"""Create a sequence of lookups with 'vars' taking priority over
|
| 1146 |
+
the 'section' which takes priority over the DEFAULTSECT.
|
| 1147 |
+
|
| 1148 |
+
"""
|
| 1149 |
+
sectiondict = {}
|
| 1150 |
+
try:
|
| 1151 |
+
sectiondict = self._sections[section]
|
| 1152 |
+
except KeyError:
|
| 1153 |
+
if section != self.default_section:
|
| 1154 |
+
raise NoSectionError(section) from None
|
| 1155 |
+
# Update with the entry specific variables
|
| 1156 |
+
vardict = {}
|
| 1157 |
+
if vars:
|
| 1158 |
+
for key, value in vars.items():
|
| 1159 |
+
if value is not None:
|
| 1160 |
+
value = str(value)
|
| 1161 |
+
vardict[self.optionxform(key)] = value
|
| 1162 |
+
return _ChainMap(vardict, sectiondict, self._defaults)
|
| 1163 |
+
|
| 1164 |
+
def _convert_to_boolean(self, value):
|
| 1165 |
+
"""Return a boolean value translating from other types if necessary.
|
| 1166 |
+
"""
|
| 1167 |
+
if value.lower() not in self.BOOLEAN_STATES:
|
| 1168 |
+
raise ValueError('Not a boolean: %s' % value)
|
| 1169 |
+
return self.BOOLEAN_STATES[value.lower()]
|
| 1170 |
+
|
| 1171 |
+
def _validate_value_types(self, *, section="", option="", value=""):
|
| 1172 |
+
"""Raises a TypeError for non-string values.
|
| 1173 |
+
|
| 1174 |
+
The only legal non-string value if we allow valueless
|
| 1175 |
+
options is None, so we need to check if the value is a
|
| 1176 |
+
string if:
|
| 1177 |
+
- we do not allow valueless options, or
|
| 1178 |
+
- we allow valueless options but the value is not None
|
| 1179 |
+
|
| 1180 |
+
For compatibility reasons this method is not used in classic set()
|
| 1181 |
+
for RawConfigParsers. It is invoked in every case for mapping protocol
|
| 1182 |
+
access and in ConfigParser.set().
|
| 1183 |
+
"""
|
| 1184 |
+
if not isinstance(section, str):
|
| 1185 |
+
raise TypeError("section names must be strings")
|
| 1186 |
+
if not isinstance(option, str):
|
| 1187 |
+
raise TypeError("option keys must be strings")
|
| 1188 |
+
if not self._allow_no_value or value:
|
| 1189 |
+
if not isinstance(value, str):
|
| 1190 |
+
raise TypeError("option values must be strings")
|
| 1191 |
+
|
| 1192 |
+
@property
|
| 1193 |
+
def converters(self):
|
| 1194 |
+
return self._converters
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
class ConfigParser(RawConfigParser):
|
| 1198 |
+
"""ConfigParser implementing interpolation."""
|
| 1199 |
+
|
| 1200 |
+
_DEFAULT_INTERPOLATION = BasicInterpolation()
|
| 1201 |
+
|
| 1202 |
+
def set(self, section, option, value=None):
|
| 1203 |
+
"""Set an option. Extends RawConfigParser.set by validating type and
|
| 1204 |
+
interpolation syntax on the value."""
|
| 1205 |
+
self._validate_value_types(option=option, value=value)
|
| 1206 |
+
super().set(section, option, value)
|
| 1207 |
+
|
| 1208 |
+
def add_section(self, section):
|
| 1209 |
+
"""Create a new section in the configuration. Extends
|
| 1210 |
+
RawConfigParser.add_section by validating if the section name is
|
| 1211 |
+
a string."""
|
| 1212 |
+
self._validate_value_types(section=section)
|
| 1213 |
+
super().add_section(section)
|
| 1214 |
+
|
| 1215 |
+
def _read_defaults(self, defaults):
|
| 1216 |
+
"""Reads the defaults passed in the initializer, implicitly converting
|
| 1217 |
+
values to strings like the rest of the API.
|
| 1218 |
+
|
| 1219 |
+
Does not perform interpolation for backwards compatibility.
|
| 1220 |
+
"""
|
| 1221 |
+
try:
|
| 1222 |
+
hold_interpolation = self._interpolation
|
| 1223 |
+
self._interpolation = Interpolation()
|
| 1224 |
+
self.read_dict({self.default_section: defaults})
|
| 1225 |
+
finally:
|
| 1226 |
+
self._interpolation = hold_interpolation
|
| 1227 |
+
|
| 1228 |
+
|
| 1229 |
+
class SafeConfigParser(ConfigParser):
|
| 1230 |
+
"""ConfigParser alias for backwards compatibility purposes."""
|
| 1231 |
+
|
| 1232 |
+
def __init__(self, *args, **kwargs):
|
| 1233 |
+
super().__init__(*args, **kwargs)
|
| 1234 |
+
warnings.warn(
|
| 1235 |
+
"The SafeConfigParser class has been renamed to ConfigParser "
|
| 1236 |
+
"in Python 3.2. This alias will be removed in Python 3.12."
|
| 1237 |
+
" Use ConfigParser directly instead.",
|
| 1238 |
+
DeprecationWarning, stacklevel=2
|
| 1239 |
+
)
|
| 1240 |
+
|
| 1241 |
+
|
| 1242 |
+
class SectionProxy(MutableMapping):
|
| 1243 |
+
"""A proxy for a single section from a parser."""
|
| 1244 |
+
|
| 1245 |
+
def __init__(self, parser, name):
|
| 1246 |
+
"""Creates a view on a section of the specified `name` in `parser`."""
|
| 1247 |
+
self._parser = parser
|
| 1248 |
+
self._name = name
|
| 1249 |
+
for conv in parser.converters:
|
| 1250 |
+
key = 'get' + conv
|
| 1251 |
+
getter = functools.partial(self.get, _impl=getattr(parser, key))
|
| 1252 |
+
setattr(self, key, getter)
|
| 1253 |
+
|
| 1254 |
+
def __repr__(self):
|
| 1255 |
+
return '<Section: {}>'.format(self._name)
|
| 1256 |
+
|
| 1257 |
+
def __getitem__(self, key):
|
| 1258 |
+
if not self._parser.has_option(self._name, key):
|
| 1259 |
+
raise KeyError(key)
|
| 1260 |
+
return self._parser.get(self._name, key)
|
| 1261 |
+
|
| 1262 |
+
def __setitem__(self, key, value):
|
| 1263 |
+
self._parser._validate_value_types(option=key, value=value)
|
| 1264 |
+
return self._parser.set(self._name, key, value)
|
| 1265 |
+
|
| 1266 |
+
def __delitem__(self, key):
|
| 1267 |
+
if not (self._parser.has_option(self._name, key) and
|
| 1268 |
+
self._parser.remove_option(self._name, key)):
|
| 1269 |
+
raise KeyError(key)
|
| 1270 |
+
|
| 1271 |
+
def __contains__(self, key):
|
| 1272 |
+
return self._parser.has_option(self._name, key)
|
| 1273 |
+
|
| 1274 |
+
def __len__(self):
|
| 1275 |
+
return len(self._options())
|
| 1276 |
+
|
| 1277 |
+
def __iter__(self):
|
| 1278 |
+
return self._options().__iter__()
|
| 1279 |
+
|
| 1280 |
+
def _options(self):
|
| 1281 |
+
if self._name != self._parser.default_section:
|
| 1282 |
+
return self._parser.options(self._name)
|
| 1283 |
+
else:
|
| 1284 |
+
return self._parser.defaults()
|
| 1285 |
+
|
| 1286 |
+
@property
|
| 1287 |
+
def parser(self):
|
| 1288 |
+
# The parser object of the proxy is read-only.
|
| 1289 |
+
return self._parser
|
| 1290 |
+
|
| 1291 |
+
@property
|
| 1292 |
+
def name(self):
|
| 1293 |
+
# The name of the section on a proxy is read-only.
|
| 1294 |
+
return self._name
|
| 1295 |
+
|
| 1296 |
+
def get(self, option, fallback=None, *, raw=False, vars=None,
|
| 1297 |
+
_impl=None, **kwargs):
|
| 1298 |
+
"""Get an option value.
|
| 1299 |
+
|
| 1300 |
+
Unless `fallback` is provided, `None` will be returned if the option
|
| 1301 |
+
is not found.
|
| 1302 |
+
|
| 1303 |
+
"""
|
| 1304 |
+
# If `_impl` is provided, it should be a getter method on the parser
|
| 1305 |
+
# object that provides the desired type conversion.
|
| 1306 |
+
if not _impl:
|
| 1307 |
+
_impl = self._parser.get
|
| 1308 |
+
return _impl(self._name, option, raw=raw, vars=vars,
|
| 1309 |
+
fallback=fallback, **kwargs)
|
| 1310 |
+
|
| 1311 |
+
|
| 1312 |
+
class ConverterMapping(MutableMapping):
|
| 1313 |
+
"""Enables reuse of get*() methods between the parser and section proxies.
|
| 1314 |
+
|
| 1315 |
+
If a parser class implements a getter directly, the value for the given
|
| 1316 |
+
key will be ``None``. The presence of the converter name here enables
|
| 1317 |
+
section proxies to find and use the implementation on the parser class.
|
| 1318 |
+
"""
|
| 1319 |
+
|
| 1320 |
+
GETTERCRE = re.compile(r"^get(?P<name>.+)$")
|
| 1321 |
+
|
| 1322 |
+
def __init__(self, parser):
|
| 1323 |
+
self._parser = parser
|
| 1324 |
+
self._data = {}
|
| 1325 |
+
for getter in dir(self._parser):
|
| 1326 |
+
m = self.GETTERCRE.match(getter)
|
| 1327 |
+
if not m or not callable(getattr(self._parser, getter)):
|
| 1328 |
+
continue
|
| 1329 |
+
self._data[m.group('name')] = None # See class docstring.
|
| 1330 |
+
|
| 1331 |
+
def __getitem__(self, key):
|
| 1332 |
+
return self._data[key]
|
| 1333 |
+
|
| 1334 |
+
def __setitem__(self, key, value):
|
| 1335 |
+
try:
|
| 1336 |
+
k = 'get' + key
|
| 1337 |
+
except TypeError:
|
| 1338 |
+
raise ValueError('Incompatible key: {} (type: {})'
|
| 1339 |
+
''.format(key, type(key)))
|
| 1340 |
+
if k == 'get':
|
| 1341 |
+
raise ValueError('Incompatible key: cannot use "" as a name')
|
| 1342 |
+
self._data[key] = value
|
| 1343 |
+
func = functools.partial(self._parser._get_conv, conv=value)
|
| 1344 |
+
func.converter = value
|
| 1345 |
+
setattr(self._parser, k, func)
|
| 1346 |
+
for proxy in self._parser.values():
|
| 1347 |
+
getter = functools.partial(proxy.get, _impl=func)
|
| 1348 |
+
setattr(proxy, k, getter)
|
| 1349 |
+
|
| 1350 |
+
def __delitem__(self, key):
|
| 1351 |
+
try:
|
| 1352 |
+
k = 'get' + (key or None)
|
| 1353 |
+
except TypeError:
|
| 1354 |
+
raise KeyError(key)
|
| 1355 |
+
del self._data[key]
|
| 1356 |
+
for inst in itertools.chain((self._parser,), self._parser.values()):
|
| 1357 |
+
try:
|
| 1358 |
+
delattr(inst, k)
|
| 1359 |
+
except AttributeError:
|
| 1360 |
+
# don't raise since the entry was present in _data, silently
|
| 1361 |
+
# clean up
|
| 1362 |
+
continue
|
| 1363 |
+
|
| 1364 |
+
def __iter__(self):
|
| 1365 |
+
return iter(self._data)
|
| 1366 |
+
|
| 1367 |
+
def __len__(self):
|
| 1368 |
+
return len(self._data)
|
evalkit_llava/lib/python3.10/copy.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Generic (shallow and deep) copying operations.
|
| 2 |
+
|
| 3 |
+
Interface summary:
|
| 4 |
+
|
| 5 |
+
import copy
|
| 6 |
+
|
| 7 |
+
x = copy.copy(y) # make a shallow copy of y
|
| 8 |
+
x = copy.deepcopy(y) # make a deep copy of y
|
| 9 |
+
|
| 10 |
+
For module specific errors, copy.Error is raised.
|
| 11 |
+
|
| 12 |
+
The difference between shallow and deep copying is only relevant for
|
| 13 |
+
compound objects (objects that contain other objects, like lists or
|
| 14 |
+
class instances).
|
| 15 |
+
|
| 16 |
+
- A shallow copy constructs a new compound object and then (to the
|
| 17 |
+
extent possible) inserts *the same objects* into it that the
|
| 18 |
+
original contains.
|
| 19 |
+
|
| 20 |
+
- A deep copy constructs a new compound object and then, recursively,
|
| 21 |
+
inserts *copies* into it of the objects found in the original.
|
| 22 |
+
|
| 23 |
+
Two problems often exist with deep copy operations that don't exist
|
| 24 |
+
with shallow copy operations:
|
| 25 |
+
|
| 26 |
+
a) recursive objects (compound objects that, directly or indirectly,
|
| 27 |
+
contain a reference to themselves) may cause a recursive loop
|
| 28 |
+
|
| 29 |
+
b) because deep copy copies *everything* it may copy too much, e.g.
|
| 30 |
+
administrative data structures that should be shared even between
|
| 31 |
+
copies
|
| 32 |
+
|
| 33 |
+
Python's deep copy operation avoids these problems by:
|
| 34 |
+
|
| 35 |
+
a) keeping a table of objects already copied during the current
|
| 36 |
+
copying pass
|
| 37 |
+
|
| 38 |
+
b) letting user-defined classes override the copying operation or the
|
| 39 |
+
set of components copied
|
| 40 |
+
|
| 41 |
+
This version does not copy types like module, class, function, method,
|
| 42 |
+
nor stack trace, stack frame, nor file, socket, window, nor any
|
| 43 |
+
similar types.
|
| 44 |
+
|
| 45 |
+
Classes can use the same interfaces to control copying that they use
|
| 46 |
+
to control pickling: they can define methods called __getinitargs__(),
|
| 47 |
+
__getstate__() and __setstate__(). See the documentation for module
|
| 48 |
+
"pickle" for information on these methods.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
import types
|
| 52 |
+
import weakref
|
| 53 |
+
from copyreg import dispatch_table
|
| 54 |
+
|
| 55 |
+
class Error(Exception):
|
| 56 |
+
pass
|
| 57 |
+
error = Error # backward compatibility
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
from org.python.core import PyStringMap
|
| 61 |
+
except ImportError:
|
| 62 |
+
PyStringMap = None
|
| 63 |
+
|
| 64 |
+
__all__ = ["Error", "copy", "deepcopy"]
|
| 65 |
+
|
| 66 |
+
def copy(x):
|
| 67 |
+
"""Shallow copy operation on arbitrary Python objects.
|
| 68 |
+
|
| 69 |
+
See the module's __doc__ string for more info.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
cls = type(x)
|
| 73 |
+
|
| 74 |
+
copier = _copy_dispatch.get(cls)
|
| 75 |
+
if copier:
|
| 76 |
+
return copier(x)
|
| 77 |
+
|
| 78 |
+
if issubclass(cls, type):
|
| 79 |
+
# treat it as a regular class:
|
| 80 |
+
return _copy_immutable(x)
|
| 81 |
+
|
| 82 |
+
copier = getattr(cls, "__copy__", None)
|
| 83 |
+
if copier is not None:
|
| 84 |
+
return copier(x)
|
| 85 |
+
|
| 86 |
+
reductor = dispatch_table.get(cls)
|
| 87 |
+
if reductor is not None:
|
| 88 |
+
rv = reductor(x)
|
| 89 |
+
else:
|
| 90 |
+
reductor = getattr(x, "__reduce_ex__", None)
|
| 91 |
+
if reductor is not None:
|
| 92 |
+
rv = reductor(4)
|
| 93 |
+
else:
|
| 94 |
+
reductor = getattr(x, "__reduce__", None)
|
| 95 |
+
if reductor:
|
| 96 |
+
rv = reductor()
|
| 97 |
+
else:
|
| 98 |
+
raise Error("un(shallow)copyable object of type %s" % cls)
|
| 99 |
+
|
| 100 |
+
if isinstance(rv, str):
|
| 101 |
+
return x
|
| 102 |
+
return _reconstruct(x, None, *rv)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
_copy_dispatch = d = {}
|
| 106 |
+
|
| 107 |
+
def _copy_immutable(x):
|
| 108 |
+
return x
|
| 109 |
+
for t in (type(None), int, float, bool, complex, str, tuple,
|
| 110 |
+
bytes, frozenset, type, range, slice, property,
|
| 111 |
+
types.BuiltinFunctionType, type(Ellipsis), type(NotImplemented),
|
| 112 |
+
types.FunctionType, weakref.ref):
|
| 113 |
+
d[t] = _copy_immutable
|
| 114 |
+
t = getattr(types, "CodeType", None)
|
| 115 |
+
if t is not None:
|
| 116 |
+
d[t] = _copy_immutable
|
| 117 |
+
|
| 118 |
+
d[list] = list.copy
|
| 119 |
+
d[dict] = dict.copy
|
| 120 |
+
d[set] = set.copy
|
| 121 |
+
d[bytearray] = bytearray.copy
|
| 122 |
+
|
| 123 |
+
if PyStringMap is not None:
|
| 124 |
+
d[PyStringMap] = PyStringMap.copy
|
| 125 |
+
|
| 126 |
+
del d, t
|
| 127 |
+
|
| 128 |
+
def deepcopy(x, memo=None, _nil=[]):
|
| 129 |
+
"""Deep copy operation on arbitrary Python objects.
|
| 130 |
+
|
| 131 |
+
See the module's __doc__ string for more info.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
if memo is None:
|
| 135 |
+
memo = {}
|
| 136 |
+
|
| 137 |
+
d = id(x)
|
| 138 |
+
y = memo.get(d, _nil)
|
| 139 |
+
if y is not _nil:
|
| 140 |
+
return y
|
| 141 |
+
|
| 142 |
+
cls = type(x)
|
| 143 |
+
|
| 144 |
+
copier = _deepcopy_dispatch.get(cls)
|
| 145 |
+
if copier is not None:
|
| 146 |
+
y = copier(x, memo)
|
| 147 |
+
else:
|
| 148 |
+
if issubclass(cls, type):
|
| 149 |
+
y = _deepcopy_atomic(x, memo)
|
| 150 |
+
else:
|
| 151 |
+
copier = getattr(x, "__deepcopy__", None)
|
| 152 |
+
if copier is not None:
|
| 153 |
+
y = copier(memo)
|
| 154 |
+
else:
|
| 155 |
+
reductor = dispatch_table.get(cls)
|
| 156 |
+
if reductor:
|
| 157 |
+
rv = reductor(x)
|
| 158 |
+
else:
|
| 159 |
+
reductor = getattr(x, "__reduce_ex__", None)
|
| 160 |
+
if reductor is not None:
|
| 161 |
+
rv = reductor(4)
|
| 162 |
+
else:
|
| 163 |
+
reductor = getattr(x, "__reduce__", None)
|
| 164 |
+
if reductor:
|
| 165 |
+
rv = reductor()
|
| 166 |
+
else:
|
| 167 |
+
raise Error(
|
| 168 |
+
"un(deep)copyable object of type %s" % cls)
|
| 169 |
+
if isinstance(rv, str):
|
| 170 |
+
y = x
|
| 171 |
+
else:
|
| 172 |
+
y = _reconstruct(x, memo, *rv)
|
| 173 |
+
|
| 174 |
+
# If is its own copy, don't memoize.
|
| 175 |
+
if y is not x:
|
| 176 |
+
memo[d] = y
|
| 177 |
+
_keep_alive(x, memo) # Make sure x lives at least as long as d
|
| 178 |
+
return y
|
| 179 |
+
|
| 180 |
+
_deepcopy_dispatch = d = {}
|
| 181 |
+
|
| 182 |
+
def _deepcopy_atomic(x, memo):
|
| 183 |
+
return x
|
| 184 |
+
d[type(None)] = _deepcopy_atomic
|
| 185 |
+
d[type(Ellipsis)] = _deepcopy_atomic
|
| 186 |
+
d[type(NotImplemented)] = _deepcopy_atomic
|
| 187 |
+
d[int] = _deepcopy_atomic
|
| 188 |
+
d[float] = _deepcopy_atomic
|
| 189 |
+
d[bool] = _deepcopy_atomic
|
| 190 |
+
d[complex] = _deepcopy_atomic
|
| 191 |
+
d[bytes] = _deepcopy_atomic
|
| 192 |
+
d[str] = _deepcopy_atomic
|
| 193 |
+
d[types.CodeType] = _deepcopy_atomic
|
| 194 |
+
d[type] = _deepcopy_atomic
|
| 195 |
+
d[range] = _deepcopy_atomic
|
| 196 |
+
d[types.BuiltinFunctionType] = _deepcopy_atomic
|
| 197 |
+
d[types.FunctionType] = _deepcopy_atomic
|
| 198 |
+
d[weakref.ref] = _deepcopy_atomic
|
| 199 |
+
d[property] = _deepcopy_atomic
|
| 200 |
+
|
| 201 |
+
def _deepcopy_list(x, memo, deepcopy=deepcopy):
|
| 202 |
+
y = []
|
| 203 |
+
memo[id(x)] = y
|
| 204 |
+
append = y.append
|
| 205 |
+
for a in x:
|
| 206 |
+
append(deepcopy(a, memo))
|
| 207 |
+
return y
|
| 208 |
+
d[list] = _deepcopy_list
|
| 209 |
+
|
| 210 |
+
def _deepcopy_tuple(x, memo, deepcopy=deepcopy):
|
| 211 |
+
y = [deepcopy(a, memo) for a in x]
|
| 212 |
+
# We're not going to put the tuple in the memo, but it's still important we
|
| 213 |
+
# check for it, in case the tuple contains recursive mutable structures.
|
| 214 |
+
try:
|
| 215 |
+
return memo[id(x)]
|
| 216 |
+
except KeyError:
|
| 217 |
+
pass
|
| 218 |
+
for k, j in zip(x, y):
|
| 219 |
+
if k is not j:
|
| 220 |
+
y = tuple(y)
|
| 221 |
+
break
|
| 222 |
+
else:
|
| 223 |
+
y = x
|
| 224 |
+
return y
|
| 225 |
+
d[tuple] = _deepcopy_tuple
|
| 226 |
+
|
| 227 |
+
def _deepcopy_dict(x, memo, deepcopy=deepcopy):
|
| 228 |
+
y = {}
|
| 229 |
+
memo[id(x)] = y
|
| 230 |
+
for key, value in x.items():
|
| 231 |
+
y[deepcopy(key, memo)] = deepcopy(value, memo)
|
| 232 |
+
return y
|
| 233 |
+
d[dict] = _deepcopy_dict
|
| 234 |
+
if PyStringMap is not None:
|
| 235 |
+
d[PyStringMap] = _deepcopy_dict
|
| 236 |
+
|
| 237 |
+
def _deepcopy_method(x, memo): # Copy instance methods
|
| 238 |
+
return type(x)(x.__func__, deepcopy(x.__self__, memo))
|
| 239 |
+
d[types.MethodType] = _deepcopy_method
|
| 240 |
+
|
| 241 |
+
del d
|
| 242 |
+
|
| 243 |
+
def _keep_alive(x, memo):
|
| 244 |
+
"""Keeps a reference to the object x in the memo.
|
| 245 |
+
|
| 246 |
+
Because we remember objects by their id, we have
|
| 247 |
+
to assure that possibly temporary objects are kept
|
| 248 |
+
alive by referencing them.
|
| 249 |
+
We store a reference at the id of the memo, which should
|
| 250 |
+
normally not be used unless someone tries to deepcopy
|
| 251 |
+
the memo itself...
|
| 252 |
+
"""
|
| 253 |
+
try:
|
| 254 |
+
memo[id(memo)].append(x)
|
| 255 |
+
except KeyError:
|
| 256 |
+
# aha, this is the first one :-)
|
| 257 |
+
memo[id(memo)]=[x]
|
| 258 |
+
|
| 259 |
+
def _reconstruct(x, memo, func, args,
|
| 260 |
+
state=None, listiter=None, dictiter=None,
|
| 261 |
+
*, deepcopy=deepcopy):
|
| 262 |
+
deep = memo is not None
|
| 263 |
+
if deep and args:
|
| 264 |
+
args = (deepcopy(arg, memo) for arg in args)
|
| 265 |
+
y = func(*args)
|
| 266 |
+
if deep:
|
| 267 |
+
memo[id(x)] = y
|
| 268 |
+
|
| 269 |
+
if state is not None:
|
| 270 |
+
if deep:
|
| 271 |
+
state = deepcopy(state, memo)
|
| 272 |
+
if hasattr(y, '__setstate__'):
|
| 273 |
+
y.__setstate__(state)
|
| 274 |
+
else:
|
| 275 |
+
if isinstance(state, tuple) and len(state) == 2:
|
| 276 |
+
state, slotstate = state
|
| 277 |
+
else:
|
| 278 |
+
slotstate = None
|
| 279 |
+
if state is not None:
|
| 280 |
+
y.__dict__.update(state)
|
| 281 |
+
if slotstate is not None:
|
| 282 |
+
for key, value in slotstate.items():
|
| 283 |
+
setattr(y, key, value)
|
| 284 |
+
|
| 285 |
+
if listiter is not None:
|
| 286 |
+
if deep:
|
| 287 |
+
for item in listiter:
|
| 288 |
+
item = deepcopy(item, memo)
|
| 289 |
+
y.append(item)
|
| 290 |
+
else:
|
| 291 |
+
for item in listiter:
|
| 292 |
+
y.append(item)
|
| 293 |
+
if dictiter is not None:
|
| 294 |
+
if deep:
|
| 295 |
+
for key, value in dictiter:
|
| 296 |
+
key = deepcopy(key, memo)
|
| 297 |
+
value = deepcopy(value, memo)
|
| 298 |
+
y[key] = value
|
| 299 |
+
else:
|
| 300 |
+
for key, value in dictiter:
|
| 301 |
+
y[key] = value
|
| 302 |
+
return y
|
| 303 |
+
|
| 304 |
+
del types, weakref, PyStringMap
|
evalkit_llava/lib/python3.10/difflib.py
ADDED
|
@@ -0,0 +1,2056 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Module difflib -- helpers for computing deltas between objects.
|
| 3 |
+
|
| 4 |
+
Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
|
| 5 |
+
Use SequenceMatcher to return list of the best "good enough" matches.
|
| 6 |
+
|
| 7 |
+
Function context_diff(a, b):
|
| 8 |
+
For two lists of strings, return a delta in context diff format.
|
| 9 |
+
|
| 10 |
+
Function ndiff(a, b):
|
| 11 |
+
Return a delta: the difference between `a` and `b` (lists of strings).
|
| 12 |
+
|
| 13 |
+
Function restore(delta, which):
|
| 14 |
+
Return one of the two sequences that generated an ndiff delta.
|
| 15 |
+
|
| 16 |
+
Function unified_diff(a, b):
|
| 17 |
+
For two lists of strings, return a delta in unified diff format.
|
| 18 |
+
|
| 19 |
+
Class SequenceMatcher:
|
| 20 |
+
A flexible class for comparing pairs of sequences of any type.
|
| 21 |
+
|
| 22 |
+
Class Differ:
|
| 23 |
+
For producing human-readable deltas from sequences of lines of text.
|
| 24 |
+
|
| 25 |
+
Class HtmlDiff:
|
| 26 |
+
For producing HTML side by side comparison with change highlights.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
|
| 30 |
+
'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
|
| 31 |
+
'unified_diff', 'diff_bytes', 'HtmlDiff', 'Match']
|
| 32 |
+
|
| 33 |
+
from heapq import nlargest as _nlargest
|
| 34 |
+
from collections import namedtuple as _namedtuple
|
| 35 |
+
from types import GenericAlias
|
| 36 |
+
|
| 37 |
+
Match = _namedtuple('Match', 'a b size')
|
| 38 |
+
|
| 39 |
+
def _calculate_ratio(matches, length):
|
| 40 |
+
if length:
|
| 41 |
+
return 2.0 * matches / length
|
| 42 |
+
return 1.0
|
| 43 |
+
|
| 44 |
+
class SequenceMatcher:
|
| 45 |
+
|
| 46 |
+
"""
|
| 47 |
+
SequenceMatcher is a flexible class for comparing pairs of sequences of
|
| 48 |
+
any type, so long as the sequence elements are hashable. The basic
|
| 49 |
+
algorithm predates, and is a little fancier than, an algorithm
|
| 50 |
+
published in the late 1980's by Ratcliff and Obershelp under the
|
| 51 |
+
hyperbolic name "gestalt pattern matching". The basic idea is to find
|
| 52 |
+
the longest contiguous matching subsequence that contains no "junk"
|
| 53 |
+
elements (R-O doesn't address junk). The same idea is then applied
|
| 54 |
+
recursively to the pieces of the sequences to the left and to the right
|
| 55 |
+
of the matching subsequence. This does not yield minimal edit
|
| 56 |
+
sequences, but does tend to yield matches that "look right" to people.
|
| 57 |
+
|
| 58 |
+
SequenceMatcher tries to compute a "human-friendly diff" between two
|
| 59 |
+
sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
| 60 |
+
longest *contiguous* & junk-free matching subsequence. That's what
|
| 61 |
+
catches peoples' eyes. The Windows(tm) windiff has another interesting
|
| 62 |
+
notion, pairing up elements that appear uniquely in each sequence.
|
| 63 |
+
That, and the method here, appear to yield more intuitive difference
|
| 64 |
+
reports than does diff. This method appears to be the least vulnerable
|
| 65 |
+
to syncing up on blocks of "junk lines", though (like blank lines in
|
| 66 |
+
ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
| 67 |
+
because this is the only method of the 3 that has a *concept* of
|
| 68 |
+
"junk" <wink>.
|
| 69 |
+
|
| 70 |
+
Example, comparing two strings, and considering blanks to be "junk":
|
| 71 |
+
|
| 72 |
+
>>> s = SequenceMatcher(lambda x: x == " ",
|
| 73 |
+
... "private Thread currentThread;",
|
| 74 |
+
... "private volatile Thread currentThread;")
|
| 75 |
+
>>>
|
| 76 |
+
|
| 77 |
+
.ratio() returns a float in [0, 1], measuring the "similarity" of the
|
| 78 |
+
sequences. As a rule of thumb, a .ratio() value over 0.6 means the
|
| 79 |
+
sequences are close matches:
|
| 80 |
+
|
| 81 |
+
>>> print(round(s.ratio(), 3))
|
| 82 |
+
0.866
|
| 83 |
+
>>>
|
| 84 |
+
|
| 85 |
+
If you're only interested in where the sequences match,
|
| 86 |
+
.get_matching_blocks() is handy:
|
| 87 |
+
|
| 88 |
+
>>> for block in s.get_matching_blocks():
|
| 89 |
+
... print("a[%d] and b[%d] match for %d elements" % block)
|
| 90 |
+
a[0] and b[0] match for 8 elements
|
| 91 |
+
a[8] and b[17] match for 21 elements
|
| 92 |
+
a[29] and b[38] match for 0 elements
|
| 93 |
+
|
| 94 |
+
Note that the last tuple returned by .get_matching_blocks() is always a
|
| 95 |
+
dummy, (len(a), len(b), 0), and this is the only case in which the last
|
| 96 |
+
tuple element (number of elements matched) is 0.
|
| 97 |
+
|
| 98 |
+
If you want to know how to change the first sequence into the second,
|
| 99 |
+
use .get_opcodes():
|
| 100 |
+
|
| 101 |
+
>>> for opcode in s.get_opcodes():
|
| 102 |
+
... print("%6s a[%d:%d] b[%d:%d]" % opcode)
|
| 103 |
+
equal a[0:8] b[0:8]
|
| 104 |
+
insert a[8:8] b[8:17]
|
| 105 |
+
equal a[8:29] b[17:38]
|
| 106 |
+
|
| 107 |
+
See the Differ class for a fancy human-friendly file differencer, which
|
| 108 |
+
uses SequenceMatcher both to compare sequences of lines, and to compare
|
| 109 |
+
sequences of characters within similar (near-matching) lines.
|
| 110 |
+
|
| 111 |
+
See also function get_close_matches() in this module, which shows how
|
| 112 |
+
simple code building on SequenceMatcher can be used to do useful work.
|
| 113 |
+
|
| 114 |
+
Timing: Basic R-O is cubic time worst case and quadratic time expected
|
| 115 |
+
case. SequenceMatcher is quadratic time for the worst case and has
|
| 116 |
+
expected-case behavior dependent in a complicated way on how many
|
| 117 |
+
elements the sequences have in common; best case time is linear.
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
def __init__(self, isjunk=None, a='', b='', autojunk=True):
|
| 121 |
+
"""Construct a SequenceMatcher.
|
| 122 |
+
|
| 123 |
+
Optional arg isjunk is None (the default), or a one-argument
|
| 124 |
+
function that takes a sequence element and returns true iff the
|
| 125 |
+
element is junk. None is equivalent to passing "lambda x: 0", i.e.
|
| 126 |
+
no elements are considered to be junk. For example, pass
|
| 127 |
+
lambda x: x in " \\t"
|
| 128 |
+
if you're comparing lines as sequences of characters, and don't
|
| 129 |
+
want to synch up on blanks or hard tabs.
|
| 130 |
+
|
| 131 |
+
Optional arg a is the first of two sequences to be compared. By
|
| 132 |
+
default, an empty string. The elements of a must be hashable. See
|
| 133 |
+
also .set_seqs() and .set_seq1().
|
| 134 |
+
|
| 135 |
+
Optional arg b is the second of two sequences to be compared. By
|
| 136 |
+
default, an empty string. The elements of b must be hashable. See
|
| 137 |
+
also .set_seqs() and .set_seq2().
|
| 138 |
+
|
| 139 |
+
Optional arg autojunk should be set to False to disable the
|
| 140 |
+
"automatic junk heuristic" that treats popular elements as junk
|
| 141 |
+
(see module documentation for more information).
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
# Members:
|
| 145 |
+
# a
|
| 146 |
+
# first sequence
|
| 147 |
+
# b
|
| 148 |
+
# second sequence; differences are computed as "what do
|
| 149 |
+
# we need to do to 'a' to change it into 'b'?"
|
| 150 |
+
# b2j
|
| 151 |
+
# for x in b, b2j[x] is a list of the indices (into b)
|
| 152 |
+
# at which x appears; junk and popular elements do not appear
|
| 153 |
+
# fullbcount
|
| 154 |
+
# for x in b, fullbcount[x] == the number of times x
|
| 155 |
+
# appears in b; only materialized if really needed (used
|
| 156 |
+
# only for computing quick_ratio())
|
| 157 |
+
# matching_blocks
|
| 158 |
+
# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
|
| 159 |
+
# ascending & non-overlapping in i and in j; terminated by
|
| 160 |
+
# a dummy (len(a), len(b), 0) sentinel
|
| 161 |
+
# opcodes
|
| 162 |
+
# a list of (tag, i1, i2, j1, j2) tuples, where tag is
|
| 163 |
+
# one of
|
| 164 |
+
# 'replace' a[i1:i2] should be replaced by b[j1:j2]
|
| 165 |
+
# 'delete' a[i1:i2] should be deleted
|
| 166 |
+
# 'insert' b[j1:j2] should be inserted
|
| 167 |
+
# 'equal' a[i1:i2] == b[j1:j2]
|
| 168 |
+
# isjunk
|
| 169 |
+
# a user-supplied function taking a sequence element and
|
| 170 |
+
# returning true iff the element is "junk" -- this has
|
| 171 |
+
# subtle but helpful effects on the algorithm, which I'll
|
| 172 |
+
# get around to writing up someday <0.9 wink>.
|
| 173 |
+
# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".
|
| 174 |
+
# bjunk
|
| 175 |
+
# the items in b for which isjunk is True.
|
| 176 |
+
# bpopular
|
| 177 |
+
# nonjunk items in b treated as junk by the heuristic (if used).
|
| 178 |
+
|
| 179 |
+
self.isjunk = isjunk
|
| 180 |
+
self.a = self.b = None
|
| 181 |
+
self.autojunk = autojunk
|
| 182 |
+
self.set_seqs(a, b)
|
| 183 |
+
|
| 184 |
+
def set_seqs(self, a, b):
|
| 185 |
+
"""Set the two sequences to be compared.
|
| 186 |
+
|
| 187 |
+
>>> s = SequenceMatcher()
|
| 188 |
+
>>> s.set_seqs("abcd", "bcde")
|
| 189 |
+
>>> s.ratio()
|
| 190 |
+
0.75
|
| 191 |
+
"""
|
| 192 |
+
|
| 193 |
+
self.set_seq1(a)
|
| 194 |
+
self.set_seq2(b)
|
| 195 |
+
|
| 196 |
+
def set_seq1(self, a):
|
| 197 |
+
"""Set the first sequence to be compared.
|
| 198 |
+
|
| 199 |
+
The second sequence to be compared is not changed.
|
| 200 |
+
|
| 201 |
+
>>> s = SequenceMatcher(None, "abcd", "bcde")
|
| 202 |
+
>>> s.ratio()
|
| 203 |
+
0.75
|
| 204 |
+
>>> s.set_seq1("bcde")
|
| 205 |
+
>>> s.ratio()
|
| 206 |
+
1.0
|
| 207 |
+
>>>
|
| 208 |
+
|
| 209 |
+
SequenceMatcher computes and caches detailed information about the
|
| 210 |
+
second sequence, so if you want to compare one sequence S against
|
| 211 |
+
many sequences, use .set_seq2(S) once and call .set_seq1(x)
|
| 212 |
+
repeatedly for each of the other sequences.
|
| 213 |
+
|
| 214 |
+
See also set_seqs() and set_seq2().
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
if a is self.a:
|
| 218 |
+
return
|
| 219 |
+
self.a = a
|
| 220 |
+
self.matching_blocks = self.opcodes = None
|
| 221 |
+
|
| 222 |
+
def set_seq2(self, b):
|
| 223 |
+
"""Set the second sequence to be compared.
|
| 224 |
+
|
| 225 |
+
The first sequence to be compared is not changed.
|
| 226 |
+
|
| 227 |
+
>>> s = SequenceMatcher(None, "abcd", "bcde")
|
| 228 |
+
>>> s.ratio()
|
| 229 |
+
0.75
|
| 230 |
+
>>> s.set_seq2("abcd")
|
| 231 |
+
>>> s.ratio()
|
| 232 |
+
1.0
|
| 233 |
+
>>>
|
| 234 |
+
|
| 235 |
+
SequenceMatcher computes and caches detailed information about the
|
| 236 |
+
second sequence, so if you want to compare one sequence S against
|
| 237 |
+
many sequences, use .set_seq2(S) once and call .set_seq1(x)
|
| 238 |
+
repeatedly for each of the other sequences.
|
| 239 |
+
|
| 240 |
+
See also set_seqs() and set_seq1().
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
if b is self.b:
|
| 244 |
+
return
|
| 245 |
+
self.b = b
|
| 246 |
+
self.matching_blocks = self.opcodes = None
|
| 247 |
+
self.fullbcount = None
|
| 248 |
+
self.__chain_b()
|
| 249 |
+
|
| 250 |
+
# For each element x in b, set b2j[x] to a list of the indices in
|
| 251 |
+
# b where x appears; the indices are in increasing order; note that
|
| 252 |
+
# the number of times x appears in b is len(b2j[x]) ...
|
| 253 |
+
# when self.isjunk is defined, junk elements don't show up in this
|
| 254 |
+
# map at all, which stops the central find_longest_match method
|
| 255 |
+
# from starting any matching block at a junk element ...
|
| 256 |
+
# b2j also does not contain entries for "popular" elements, meaning
|
| 257 |
+
# elements that account for more than 1 + 1% of the total elements, and
|
| 258 |
+
# when the sequence is reasonably large (>= 200 elements); this can
|
| 259 |
+
# be viewed as an adaptive notion of semi-junk, and yields an enormous
|
| 260 |
+
# speedup when, e.g., comparing program files with hundreds of
|
| 261 |
+
# instances of "return NULL;" ...
|
| 262 |
+
# note that this is only called when b changes; so for cross-product
|
| 263 |
+
# kinds of matches, it's best to call set_seq2 once, then set_seq1
|
| 264 |
+
# repeatedly
|
| 265 |
+
|
| 266 |
+
def __chain_b(self):
|
| 267 |
+
# Because isjunk is a user-defined (not C) function, and we test
|
| 268 |
+
# for junk a LOT, it's important to minimize the number of calls.
|
| 269 |
+
# Before the tricks described here, __chain_b was by far the most
|
| 270 |
+
# time-consuming routine in the whole module! If anyone sees
|
| 271 |
+
# Jim Roskind, thank him again for profile.py -- I never would
|
| 272 |
+
# have guessed that.
|
| 273 |
+
# The first trick is to build b2j ignoring the possibility
|
| 274 |
+
# of junk. I.e., we don't call isjunk at all yet. Throwing
|
| 275 |
+
# out the junk later is much cheaper than building b2j "right"
|
| 276 |
+
# from the start.
|
| 277 |
+
b = self.b
|
| 278 |
+
self.b2j = b2j = {}
|
| 279 |
+
|
| 280 |
+
for i, elt in enumerate(b):
|
| 281 |
+
indices = b2j.setdefault(elt, [])
|
| 282 |
+
indices.append(i)
|
| 283 |
+
|
| 284 |
+
# Purge junk elements
|
| 285 |
+
self.bjunk = junk = set()
|
| 286 |
+
isjunk = self.isjunk
|
| 287 |
+
if isjunk:
|
| 288 |
+
for elt in b2j.keys():
|
| 289 |
+
if isjunk(elt):
|
| 290 |
+
junk.add(elt)
|
| 291 |
+
for elt in junk: # separate loop avoids separate list of keys
|
| 292 |
+
del b2j[elt]
|
| 293 |
+
|
| 294 |
+
# Purge popular elements that are not junk
|
| 295 |
+
self.bpopular = popular = set()
|
| 296 |
+
n = len(b)
|
| 297 |
+
if self.autojunk and n >= 200:
|
| 298 |
+
ntest = n // 100 + 1
|
| 299 |
+
for elt, idxs in b2j.items():
|
| 300 |
+
if len(idxs) > ntest:
|
| 301 |
+
popular.add(elt)
|
| 302 |
+
for elt in popular: # ditto; as fast for 1% deletion
|
| 303 |
+
del b2j[elt]
|
| 304 |
+
|
| 305 |
+
def find_longest_match(self, alo=0, ahi=None, blo=0, bhi=None):
|
| 306 |
+
"""Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
| 307 |
+
|
| 308 |
+
By default it will find the longest match in the entirety of a and b.
|
| 309 |
+
|
| 310 |
+
If isjunk is not defined:
|
| 311 |
+
|
| 312 |
+
Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
| 313 |
+
alo <= i <= i+k <= ahi
|
| 314 |
+
blo <= j <= j+k <= bhi
|
| 315 |
+
and for all (i',j',k') meeting those conditions,
|
| 316 |
+
k >= k'
|
| 317 |
+
i <= i'
|
| 318 |
+
and if i == i', j <= j'
|
| 319 |
+
|
| 320 |
+
In other words, of all maximal matching blocks, return one that
|
| 321 |
+
starts earliest in a, and of all those maximal matching blocks that
|
| 322 |
+
start earliest in a, return the one that starts earliest in b.
|
| 323 |
+
|
| 324 |
+
>>> s = SequenceMatcher(None, " abcd", "abcd abcd")
|
| 325 |
+
>>> s.find_longest_match(0, 5, 0, 9)
|
| 326 |
+
Match(a=0, b=4, size=5)
|
| 327 |
+
|
| 328 |
+
If isjunk is defined, first the longest matching block is
|
| 329 |
+
determined as above, but with the additional restriction that no
|
| 330 |
+
junk element appears in the block. Then that block is extended as
|
| 331 |
+
far as possible by matching (only) junk elements on both sides. So
|
| 332 |
+
the resulting block never matches on junk except as identical junk
|
| 333 |
+
happens to be adjacent to an "interesting" match.
|
| 334 |
+
|
| 335 |
+
Here's the same example as before, but considering blanks to be
|
| 336 |
+
junk. That prevents " abcd" from matching the " abcd" at the tail
|
| 337 |
+
end of the second sequence directly. Instead only the "abcd" can
|
| 338 |
+
match, and matches the leftmost "abcd" in the second sequence:
|
| 339 |
+
|
| 340 |
+
>>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
|
| 341 |
+
>>> s.find_longest_match(0, 5, 0, 9)
|
| 342 |
+
Match(a=1, b=0, size=4)
|
| 343 |
+
|
| 344 |
+
If no blocks match, return (alo, blo, 0).
|
| 345 |
+
|
| 346 |
+
>>> s = SequenceMatcher(None, "ab", "c")
|
| 347 |
+
>>> s.find_longest_match(0, 2, 0, 1)
|
| 348 |
+
Match(a=0, b=0, size=0)
|
| 349 |
+
"""
|
| 350 |
+
|
| 351 |
+
# CAUTION: stripping common prefix or suffix would be incorrect.
|
| 352 |
+
# E.g.,
|
| 353 |
+
# ab
|
| 354 |
+
# acab
|
| 355 |
+
# Longest matching block is "ab", but if common prefix is
|
| 356 |
+
# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
| 357 |
+
# strip, so ends up claiming that ab is changed to acab by
|
| 358 |
+
# inserting "ca" in the middle. That's minimal but unintuitive:
|
| 359 |
+
# "it's obvious" that someone inserted "ac" at the front.
|
| 360 |
+
# Windiff ends up at the same place as diff, but by pairing up
|
| 361 |
+
# the unique 'b's and then matching the first two 'a's.
|
| 362 |
+
|
| 363 |
+
a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__
|
| 364 |
+
if ahi is None:
|
| 365 |
+
ahi = len(a)
|
| 366 |
+
if bhi is None:
|
| 367 |
+
bhi = len(b)
|
| 368 |
+
besti, bestj, bestsize = alo, blo, 0
|
| 369 |
+
# find longest junk-free match
|
| 370 |
+
# during an iteration of the loop, j2len[j] = length of longest
|
| 371 |
+
# junk-free match ending with a[i-1] and b[j]
|
| 372 |
+
j2len = {}
|
| 373 |
+
nothing = []
|
| 374 |
+
for i in range(alo, ahi):
|
| 375 |
+
# look at all instances of a[i] in b; note that because
|
| 376 |
+
# b2j has no junk keys, the loop is skipped if a[i] is junk
|
| 377 |
+
j2lenget = j2len.get
|
| 378 |
+
newj2len = {}
|
| 379 |
+
for j in b2j.get(a[i], nothing):
|
| 380 |
+
# a[i] matches b[j]
|
| 381 |
+
if j < blo:
|
| 382 |
+
continue
|
| 383 |
+
if j >= bhi:
|
| 384 |
+
break
|
| 385 |
+
k = newj2len[j] = j2lenget(j-1, 0) + 1
|
| 386 |
+
if k > bestsize:
|
| 387 |
+
besti, bestj, bestsize = i-k+1, j-k+1, k
|
| 388 |
+
j2len = newj2len
|
| 389 |
+
|
| 390 |
+
# Extend the best by non-junk elements on each end. In particular,
|
| 391 |
+
# "popular" non-junk elements aren't in b2j, which greatly speeds
|
| 392 |
+
# the inner loop above, but also means "the best" match so far
|
| 393 |
+
# doesn't contain any junk *or* popular non-junk elements.
|
| 394 |
+
while besti > alo and bestj > blo and \
|
| 395 |
+
not isbjunk(b[bestj-1]) and \
|
| 396 |
+
a[besti-1] == b[bestj-1]:
|
| 397 |
+
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
| 398 |
+
while besti+bestsize < ahi and bestj+bestsize < bhi and \
|
| 399 |
+
not isbjunk(b[bestj+bestsize]) and \
|
| 400 |
+
a[besti+bestsize] == b[bestj+bestsize]:
|
| 401 |
+
bestsize += 1
|
| 402 |
+
|
| 403 |
+
# Now that we have a wholly interesting match (albeit possibly
|
| 404 |
+
# empty!), we may as well suck up the matching junk on each
|
| 405 |
+
# side of it too. Can't think of a good reason not to, and it
|
| 406 |
+
# saves post-processing the (possibly considerable) expense of
|
| 407 |
+
# figuring out what to do with it. In the case of an empty
|
| 408 |
+
# interesting match, this is clearly the right thing to do,
|
| 409 |
+
# because no other kind of match is possible in the regions.
|
| 410 |
+
while besti > alo and bestj > blo and \
|
| 411 |
+
isbjunk(b[bestj-1]) and \
|
| 412 |
+
a[besti-1] == b[bestj-1]:
|
| 413 |
+
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
| 414 |
+
while besti+bestsize < ahi and bestj+bestsize < bhi and \
|
| 415 |
+
isbjunk(b[bestj+bestsize]) and \
|
| 416 |
+
a[besti+bestsize] == b[bestj+bestsize]:
|
| 417 |
+
bestsize = bestsize + 1
|
| 418 |
+
|
| 419 |
+
return Match(besti, bestj, bestsize)
|
| 420 |
+
|
| 421 |
+
def get_matching_blocks(self):
|
| 422 |
+
"""Return list of triples describing matching subsequences.
|
| 423 |
+
|
| 424 |
+
Each triple is of the form (i, j, n), and means that
|
| 425 |
+
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
| 426 |
+
i and in j. New in Python 2.5, it's also guaranteed that if
|
| 427 |
+
(i, j, n) and (i', j', n') are adjacent triples in the list, and
|
| 428 |
+
the second is not the last triple in the list, then i+n != i' or
|
| 429 |
+
j+n != j'. IOW, adjacent triples never describe adjacent equal
|
| 430 |
+
blocks.
|
| 431 |
+
|
| 432 |
+
The last triple is a dummy, (len(a), len(b), 0), and is the only
|
| 433 |
+
triple with n==0.
|
| 434 |
+
|
| 435 |
+
>>> s = SequenceMatcher(None, "abxcd", "abcd")
|
| 436 |
+
>>> list(s.get_matching_blocks())
|
| 437 |
+
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
|
| 438 |
+
"""
|
| 439 |
+
|
| 440 |
+
if self.matching_blocks is not None:
|
| 441 |
+
return self.matching_blocks
|
| 442 |
+
la, lb = len(self.a), len(self.b)
|
| 443 |
+
|
| 444 |
+
# This is most naturally expressed as a recursive algorithm, but
|
| 445 |
+
# at least one user bumped into extreme use cases that exceeded
|
| 446 |
+
# the recursion limit on their box. So, now we maintain a list
|
| 447 |
+
# ('queue`) of blocks we still need to look at, and append partial
|
| 448 |
+
# results to `matching_blocks` in a loop; the matches are sorted
|
| 449 |
+
# at the end.
|
| 450 |
+
queue = [(0, la, 0, lb)]
|
| 451 |
+
matching_blocks = []
|
| 452 |
+
while queue:
|
| 453 |
+
alo, ahi, blo, bhi = queue.pop()
|
| 454 |
+
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
|
| 455 |
+
# a[alo:i] vs b[blo:j] unknown
|
| 456 |
+
# a[i:i+k] same as b[j:j+k]
|
| 457 |
+
# a[i+k:ahi] vs b[j+k:bhi] unknown
|
| 458 |
+
if k: # if k is 0, there was no matching block
|
| 459 |
+
matching_blocks.append(x)
|
| 460 |
+
if alo < i and blo < j:
|
| 461 |
+
queue.append((alo, i, blo, j))
|
| 462 |
+
if i+k < ahi and j+k < bhi:
|
| 463 |
+
queue.append((i+k, ahi, j+k, bhi))
|
| 464 |
+
matching_blocks.sort()
|
| 465 |
+
|
| 466 |
+
# It's possible that we have adjacent equal blocks in the
|
| 467 |
+
# matching_blocks list now. Starting with 2.5, this code was added
|
| 468 |
+
# to collapse them.
|
| 469 |
+
i1 = j1 = k1 = 0
|
| 470 |
+
non_adjacent = []
|
| 471 |
+
for i2, j2, k2 in matching_blocks:
|
| 472 |
+
# Is this block adjacent to i1, j1, k1?
|
| 473 |
+
if i1 + k1 == i2 and j1 + k1 == j2:
|
| 474 |
+
# Yes, so collapse them -- this just increases the length of
|
| 475 |
+
# the first block by the length of the second, and the first
|
| 476 |
+
# block so lengthened remains the block to compare against.
|
| 477 |
+
k1 += k2
|
| 478 |
+
else:
|
| 479 |
+
# Not adjacent. Remember the first block (k1==0 means it's
|
| 480 |
+
# the dummy we started with), and make the second block the
|
| 481 |
+
# new block to compare against.
|
| 482 |
+
if k1:
|
| 483 |
+
non_adjacent.append((i1, j1, k1))
|
| 484 |
+
i1, j1, k1 = i2, j2, k2
|
| 485 |
+
if k1:
|
| 486 |
+
non_adjacent.append((i1, j1, k1))
|
| 487 |
+
|
| 488 |
+
non_adjacent.append( (la, lb, 0) )
|
| 489 |
+
self.matching_blocks = list(map(Match._make, non_adjacent))
|
| 490 |
+
return self.matching_blocks
|
| 491 |
+
|
| 492 |
+
def get_opcodes(self):
|
| 493 |
+
"""Return list of 5-tuples describing how to turn a into b.
|
| 494 |
+
|
| 495 |
+
Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
| 496 |
+
has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
| 497 |
+
tuple preceding it, and likewise for j1 == the previous j2.
|
| 498 |
+
|
| 499 |
+
The tags are strings, with these meanings:
|
| 500 |
+
|
| 501 |
+
'replace': a[i1:i2] should be replaced by b[j1:j2]
|
| 502 |
+
'delete': a[i1:i2] should be deleted.
|
| 503 |
+
Note that j1==j2 in this case.
|
| 504 |
+
'insert': b[j1:j2] should be inserted at a[i1:i1].
|
| 505 |
+
Note that i1==i2 in this case.
|
| 506 |
+
'equal': a[i1:i2] == b[j1:j2]
|
| 507 |
+
|
| 508 |
+
>>> a = "qabxcd"
|
| 509 |
+
>>> b = "abycdf"
|
| 510 |
+
>>> s = SequenceMatcher(None, a, b)
|
| 511 |
+
>>> for tag, i1, i2, j1, j2 in s.get_opcodes():
|
| 512 |
+
... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
|
| 513 |
+
... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2])))
|
| 514 |
+
delete a[0:1] (q) b[0:0] ()
|
| 515 |
+
equal a[1:3] (ab) b[0:2] (ab)
|
| 516 |
+
replace a[3:4] (x) b[2:3] (y)
|
| 517 |
+
equal a[4:6] (cd) b[3:5] (cd)
|
| 518 |
+
insert a[6:6] () b[5:6] (f)
|
| 519 |
+
"""
|
| 520 |
+
|
| 521 |
+
if self.opcodes is not None:
|
| 522 |
+
return self.opcodes
|
| 523 |
+
i = j = 0
|
| 524 |
+
self.opcodes = answer = []
|
| 525 |
+
for ai, bj, size in self.get_matching_blocks():
|
| 526 |
+
# invariant: we've pumped out correct diffs to change
|
| 527 |
+
# a[:i] into b[:j], and the next matching block is
|
| 528 |
+
# a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
| 529 |
+
# out a diff to change a[i:ai] into b[j:bj], pump out
|
| 530 |
+
# the matching block, and move (i,j) beyond the match
|
| 531 |
+
tag = ''
|
| 532 |
+
if i < ai and j < bj:
|
| 533 |
+
tag = 'replace'
|
| 534 |
+
elif i < ai:
|
| 535 |
+
tag = 'delete'
|
| 536 |
+
elif j < bj:
|
| 537 |
+
tag = 'insert'
|
| 538 |
+
if tag:
|
| 539 |
+
answer.append( (tag, i, ai, j, bj) )
|
| 540 |
+
i, j = ai+size, bj+size
|
| 541 |
+
# the list of matching blocks is terminated by a
|
| 542 |
+
# sentinel with size 0
|
| 543 |
+
if size:
|
| 544 |
+
answer.append( ('equal', ai, i, bj, j) )
|
| 545 |
+
return answer
|
| 546 |
+
|
| 547 |
+
def get_grouped_opcodes(self, n=3):
|
| 548 |
+
""" Isolate change clusters by eliminating ranges with no changes.
|
| 549 |
+
|
| 550 |
+
Return a generator of groups with up to n lines of context.
|
| 551 |
+
Each group is in the same format as returned by get_opcodes().
|
| 552 |
+
|
| 553 |
+
>>> from pprint import pprint
|
| 554 |
+
>>> a = list(map(str, range(1,40)))
|
| 555 |
+
>>> b = a[:]
|
| 556 |
+
>>> b[8:8] = ['i'] # Make an insertion
|
| 557 |
+
>>> b[20] += 'x' # Make a replacement
|
| 558 |
+
>>> b[23:28] = [] # Make a deletion
|
| 559 |
+
>>> b[30] += 'y' # Make another replacement
|
| 560 |
+
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
|
| 561 |
+
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
|
| 562 |
+
[('equal', 16, 19, 17, 20),
|
| 563 |
+
('replace', 19, 20, 20, 21),
|
| 564 |
+
('equal', 20, 22, 21, 23),
|
| 565 |
+
('delete', 22, 27, 23, 23),
|
| 566 |
+
('equal', 27, 30, 23, 26)],
|
| 567 |
+
[('equal', 31, 34, 27, 30),
|
| 568 |
+
('replace', 34, 35, 30, 31),
|
| 569 |
+
('equal', 35, 38, 31, 34)]]
|
| 570 |
+
"""
|
| 571 |
+
|
| 572 |
+
codes = self.get_opcodes()
|
| 573 |
+
if not codes:
|
| 574 |
+
codes = [("equal", 0, 1, 0, 1)]
|
| 575 |
+
# Fixup leading and trailing groups if they show no changes.
|
| 576 |
+
if codes[0][0] == 'equal':
|
| 577 |
+
tag, i1, i2, j1, j2 = codes[0]
|
| 578 |
+
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
|
| 579 |
+
if codes[-1][0] == 'equal':
|
| 580 |
+
tag, i1, i2, j1, j2 = codes[-1]
|
| 581 |
+
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
|
| 582 |
+
|
| 583 |
+
nn = n + n
|
| 584 |
+
group = []
|
| 585 |
+
for tag, i1, i2, j1, j2 in codes:
|
| 586 |
+
# End the current group and start a new one whenever
|
| 587 |
+
# there is a large range with no changes.
|
| 588 |
+
if tag == 'equal' and i2-i1 > nn:
|
| 589 |
+
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
|
| 590 |
+
yield group
|
| 591 |
+
group = []
|
| 592 |
+
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
| 593 |
+
group.append((tag, i1, i2, j1 ,j2))
|
| 594 |
+
if group and not (len(group)==1 and group[0][0] == 'equal'):
|
| 595 |
+
yield group
|
| 596 |
+
|
| 597 |
+
def ratio(self):
|
| 598 |
+
"""Return a measure of the sequences' similarity (float in [0,1]).
|
| 599 |
+
|
| 600 |
+
Where T is the total number of elements in both sequences, and
|
| 601 |
+
M is the number of matches, this is 2.0*M / T.
|
| 602 |
+
Note that this is 1 if the sequences are identical, and 0 if
|
| 603 |
+
they have nothing in common.
|
| 604 |
+
|
| 605 |
+
.ratio() is expensive to compute if you haven't already computed
|
| 606 |
+
.get_matching_blocks() or .get_opcodes(), in which case you may
|
| 607 |
+
want to try .quick_ratio() or .real_quick_ratio() first to get an
|
| 608 |
+
upper bound.
|
| 609 |
+
|
| 610 |
+
>>> s = SequenceMatcher(None, "abcd", "bcde")
|
| 611 |
+
>>> s.ratio()
|
| 612 |
+
0.75
|
| 613 |
+
>>> s.quick_ratio()
|
| 614 |
+
0.75
|
| 615 |
+
>>> s.real_quick_ratio()
|
| 616 |
+
1.0
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
matches = sum(triple[-1] for triple in self.get_matching_blocks())
|
| 620 |
+
return _calculate_ratio(matches, len(self.a) + len(self.b))
|
| 621 |
+
|
| 622 |
+
def quick_ratio(self):
|
| 623 |
+
"""Return an upper bound on ratio() relatively quickly.
|
| 624 |
+
|
| 625 |
+
This isn't defined beyond that it is an upper bound on .ratio(), and
|
| 626 |
+
is faster to compute.
|
| 627 |
+
"""
|
| 628 |
+
|
| 629 |
+
# viewing a and b as multisets, set matches to the cardinality
|
| 630 |
+
# of their intersection; this counts the number of matches
|
| 631 |
+
# without regard to order, so is clearly an upper bound
|
| 632 |
+
if self.fullbcount is None:
|
| 633 |
+
self.fullbcount = fullbcount = {}
|
| 634 |
+
for elt in self.b:
|
| 635 |
+
fullbcount[elt] = fullbcount.get(elt, 0) + 1
|
| 636 |
+
fullbcount = self.fullbcount
|
| 637 |
+
# avail[x] is the number of times x appears in 'b' less the
|
| 638 |
+
# number of times we've seen it in 'a' so far ... kinda
|
| 639 |
+
avail = {}
|
| 640 |
+
availhas, matches = avail.__contains__, 0
|
| 641 |
+
for elt in self.a:
|
| 642 |
+
if availhas(elt):
|
| 643 |
+
numb = avail[elt]
|
| 644 |
+
else:
|
| 645 |
+
numb = fullbcount.get(elt, 0)
|
| 646 |
+
avail[elt] = numb - 1
|
| 647 |
+
if numb > 0:
|
| 648 |
+
matches = matches + 1
|
| 649 |
+
return _calculate_ratio(matches, len(self.a) + len(self.b))
|
| 650 |
+
|
| 651 |
+
def real_quick_ratio(self):
|
| 652 |
+
"""Return an upper bound on ratio() very quickly.
|
| 653 |
+
|
| 654 |
+
This isn't defined beyond that it is an upper bound on .ratio(), and
|
| 655 |
+
is faster to compute than either .ratio() or .quick_ratio().
|
| 656 |
+
"""
|
| 657 |
+
|
| 658 |
+
la, lb = len(self.a), len(self.b)
|
| 659 |
+
# can't have more matches than the number of elements in the
|
| 660 |
+
# shorter sequence
|
| 661 |
+
return _calculate_ratio(min(la, lb), la + lb)
|
| 662 |
+
|
| 663 |
+
__class_getitem__ = classmethod(GenericAlias)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def get_close_matches(word, possibilities, n=3, cutoff=0.6):
|
| 667 |
+
"""Use SequenceMatcher to return list of the best "good enough" matches.
|
| 668 |
+
|
| 669 |
+
word is a sequence for which close matches are desired (typically a
|
| 670 |
+
string).
|
| 671 |
+
|
| 672 |
+
possibilities is a list of sequences against which to match word
|
| 673 |
+
(typically a list of strings).
|
| 674 |
+
|
| 675 |
+
Optional arg n (default 3) is the maximum number of close matches to
|
| 676 |
+
return. n must be > 0.
|
| 677 |
+
|
| 678 |
+
Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
|
| 679 |
+
that don't score at least that similar to word are ignored.
|
| 680 |
+
|
| 681 |
+
The best (no more than n) matches among the possibilities are returned
|
| 682 |
+
in a list, sorted by similarity score, most similar first.
|
| 683 |
+
|
| 684 |
+
>>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
|
| 685 |
+
['apple', 'ape']
|
| 686 |
+
>>> import keyword as _keyword
|
| 687 |
+
>>> get_close_matches("wheel", _keyword.kwlist)
|
| 688 |
+
['while']
|
| 689 |
+
>>> get_close_matches("Apple", _keyword.kwlist)
|
| 690 |
+
[]
|
| 691 |
+
>>> get_close_matches("accept", _keyword.kwlist)
|
| 692 |
+
['except']
|
| 693 |
+
"""
|
| 694 |
+
|
| 695 |
+
if not n > 0:
|
| 696 |
+
raise ValueError("n must be > 0: %r" % (n,))
|
| 697 |
+
if not 0.0 <= cutoff <= 1.0:
|
| 698 |
+
raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
|
| 699 |
+
result = []
|
| 700 |
+
s = SequenceMatcher()
|
| 701 |
+
s.set_seq2(word)
|
| 702 |
+
for x in possibilities:
|
| 703 |
+
s.set_seq1(x)
|
| 704 |
+
if s.real_quick_ratio() >= cutoff and \
|
| 705 |
+
s.quick_ratio() >= cutoff and \
|
| 706 |
+
s.ratio() >= cutoff:
|
| 707 |
+
result.append((s.ratio(), x))
|
| 708 |
+
|
| 709 |
+
# Move the best scorers to head of list
|
| 710 |
+
result = _nlargest(n, result)
|
| 711 |
+
# Strip scores for the best n matches
|
| 712 |
+
return [x for score, x in result]
|
| 713 |
+
|
| 714 |
+
|
| 715 |
+
def _keep_original_ws(s, tag_s):
|
| 716 |
+
"""Replace whitespace with the original whitespace characters in `s`"""
|
| 717 |
+
return ''.join(
|
| 718 |
+
c if tag_c == " " and c.isspace() else tag_c
|
| 719 |
+
for c, tag_c in zip(s, tag_s)
|
| 720 |
+
)
|
| 721 |
+
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
class Differ:
|
| 725 |
+
r"""
|
| 726 |
+
Differ is a class for comparing sequences of lines of text, and
|
| 727 |
+
producing human-readable differences or deltas. Differ uses
|
| 728 |
+
SequenceMatcher both to compare sequences of lines, and to compare
|
| 729 |
+
sequences of characters within similar (near-matching) lines.
|
| 730 |
+
|
| 731 |
+
Each line of a Differ delta begins with a two-letter code:
|
| 732 |
+
|
| 733 |
+
'- ' line unique to sequence 1
|
| 734 |
+
'+ ' line unique to sequence 2
|
| 735 |
+
' ' line common to both sequences
|
| 736 |
+
'? ' line not present in either input sequence
|
| 737 |
+
|
| 738 |
+
Lines beginning with '? ' attempt to guide the eye to intraline
|
| 739 |
+
differences, and were not present in either input sequence. These lines
|
| 740 |
+
can be confusing if the sequences contain tab characters.
|
| 741 |
+
|
| 742 |
+
Note that Differ makes no claim to produce a *minimal* diff. To the
|
| 743 |
+
contrary, minimal diffs are often counter-intuitive, because they synch
|
| 744 |
+
up anywhere possible, sometimes accidental matches 100 pages apart.
|
| 745 |
+
Restricting synch points to contiguous matches preserves some notion of
|
| 746 |
+
locality, at the occasional cost of producing a longer diff.
|
| 747 |
+
|
| 748 |
+
Example: Comparing two texts.
|
| 749 |
+
|
| 750 |
+
First we set up the texts, sequences of individual single-line strings
|
| 751 |
+
ending with newlines (such sequences can also be obtained from the
|
| 752 |
+
`readlines()` method of file-like objects):
|
| 753 |
+
|
| 754 |
+
>>> text1 = ''' 1. Beautiful is better than ugly.
|
| 755 |
+
... 2. Explicit is better than implicit.
|
| 756 |
+
... 3. Simple is better than complex.
|
| 757 |
+
... 4. Complex is better than complicated.
|
| 758 |
+
... '''.splitlines(keepends=True)
|
| 759 |
+
>>> len(text1)
|
| 760 |
+
4
|
| 761 |
+
>>> text1[0][-1]
|
| 762 |
+
'\n'
|
| 763 |
+
>>> text2 = ''' 1. Beautiful is better than ugly.
|
| 764 |
+
... 3. Simple is better than complex.
|
| 765 |
+
... 4. Complicated is better than complex.
|
| 766 |
+
... 5. Flat is better than nested.
|
| 767 |
+
... '''.splitlines(keepends=True)
|
| 768 |
+
|
| 769 |
+
Next we instantiate a Differ object:
|
| 770 |
+
|
| 771 |
+
>>> d = Differ()
|
| 772 |
+
|
| 773 |
+
Note that when instantiating a Differ object we may pass functions to
|
| 774 |
+
filter out line and character 'junk'. See Differ.__init__ for details.
|
| 775 |
+
|
| 776 |
+
Finally, we compare the two:
|
| 777 |
+
|
| 778 |
+
>>> result = list(d.compare(text1, text2))
|
| 779 |
+
|
| 780 |
+
'result' is a list of strings, so let's pretty-print it:
|
| 781 |
+
|
| 782 |
+
>>> from pprint import pprint as _pprint
|
| 783 |
+
>>> _pprint(result)
|
| 784 |
+
[' 1. Beautiful is better than ugly.\n',
|
| 785 |
+
'- 2. Explicit is better than implicit.\n',
|
| 786 |
+
'- 3. Simple is better than complex.\n',
|
| 787 |
+
'+ 3. Simple is better than complex.\n',
|
| 788 |
+
'? ++\n',
|
| 789 |
+
'- 4. Complex is better than complicated.\n',
|
| 790 |
+
'? ^ ---- ^\n',
|
| 791 |
+
'+ 4. Complicated is better than complex.\n',
|
| 792 |
+
'? ++++ ^ ^\n',
|
| 793 |
+
'+ 5. Flat is better than nested.\n']
|
| 794 |
+
|
| 795 |
+
As a single multi-line string it looks like this:
|
| 796 |
+
|
| 797 |
+
>>> print(''.join(result), end="")
|
| 798 |
+
1. Beautiful is better than ugly.
|
| 799 |
+
- 2. Explicit is better than implicit.
|
| 800 |
+
- 3. Simple is better than complex.
|
| 801 |
+
+ 3. Simple is better than complex.
|
| 802 |
+
? ++
|
| 803 |
+
- 4. Complex is better than complicated.
|
| 804 |
+
? ^ ---- ^
|
| 805 |
+
+ 4. Complicated is better than complex.
|
| 806 |
+
? ++++ ^ ^
|
| 807 |
+
+ 5. Flat is better than nested.
|
| 808 |
+
"""
|
| 809 |
+
|
| 810 |
+
def __init__(self, linejunk=None, charjunk=None):
|
| 811 |
+
"""
|
| 812 |
+
Construct a text differencer, with optional filters.
|
| 813 |
+
|
| 814 |
+
The two optional keyword parameters are for filter functions:
|
| 815 |
+
|
| 816 |
+
- `linejunk`: A function that should accept a single string argument,
|
| 817 |
+
and return true iff the string is junk. The module-level function
|
| 818 |
+
`IS_LINE_JUNK` may be used to filter out lines without visible
|
| 819 |
+
characters, except for at most one splat ('#'). It is recommended
|
| 820 |
+
to leave linejunk None; the underlying SequenceMatcher class has
|
| 821 |
+
an adaptive notion of "noise" lines that's better than any static
|
| 822 |
+
definition the author has ever been able to craft.
|
| 823 |
+
|
| 824 |
+
- `charjunk`: A function that should accept a string of length 1. The
|
| 825 |
+
module-level function `IS_CHARACTER_JUNK` may be used to filter out
|
| 826 |
+
whitespace characters (a blank or tab; **note**: bad idea to include
|
| 827 |
+
newline in this!). Use of IS_CHARACTER_JUNK is recommended.
|
| 828 |
+
"""
|
| 829 |
+
|
| 830 |
+
self.linejunk = linejunk
|
| 831 |
+
self.charjunk = charjunk
|
| 832 |
+
|
| 833 |
+
def compare(self, a, b):
|
| 834 |
+
r"""
|
| 835 |
+
Compare two sequences of lines; generate the resulting delta.
|
| 836 |
+
|
| 837 |
+
Each sequence must contain individual single-line strings ending with
|
| 838 |
+
newlines. Such sequences can be obtained from the `readlines()` method
|
| 839 |
+
of file-like objects. The delta generated also consists of newline-
|
| 840 |
+
terminated strings, ready to be printed as-is via the writelines()
|
| 841 |
+
method of a file-like object.
|
| 842 |
+
|
| 843 |
+
Example:
|
| 844 |
+
|
| 845 |
+
>>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True),
|
| 846 |
+
... 'ore\ntree\nemu\n'.splitlines(True))),
|
| 847 |
+
... end="")
|
| 848 |
+
- one
|
| 849 |
+
? ^
|
| 850 |
+
+ ore
|
| 851 |
+
? ^
|
| 852 |
+
- two
|
| 853 |
+
- three
|
| 854 |
+
? -
|
| 855 |
+
+ tree
|
| 856 |
+
+ emu
|
| 857 |
+
"""
|
| 858 |
+
|
| 859 |
+
cruncher = SequenceMatcher(self.linejunk, a, b)
|
| 860 |
+
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
|
| 861 |
+
if tag == 'replace':
|
| 862 |
+
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
|
| 863 |
+
elif tag == 'delete':
|
| 864 |
+
g = self._dump('-', a, alo, ahi)
|
| 865 |
+
elif tag == 'insert':
|
| 866 |
+
g = self._dump('+', b, blo, bhi)
|
| 867 |
+
elif tag == 'equal':
|
| 868 |
+
g = self._dump(' ', a, alo, ahi)
|
| 869 |
+
else:
|
| 870 |
+
raise ValueError('unknown tag %r' % (tag,))
|
| 871 |
+
|
| 872 |
+
yield from g
|
| 873 |
+
|
| 874 |
+
def _dump(self, tag, x, lo, hi):
|
| 875 |
+
"""Generate comparison results for a same-tagged range."""
|
| 876 |
+
for i in range(lo, hi):
|
| 877 |
+
yield '%s %s' % (tag, x[i])
|
| 878 |
+
|
| 879 |
+
def _plain_replace(self, a, alo, ahi, b, blo, bhi):
|
| 880 |
+
assert alo < ahi and blo < bhi
|
| 881 |
+
# dump the shorter block first -- reduces the burden on short-term
|
| 882 |
+
# memory if the blocks are of very different sizes
|
| 883 |
+
if bhi - blo < ahi - alo:
|
| 884 |
+
first = self._dump('+', b, blo, bhi)
|
| 885 |
+
second = self._dump('-', a, alo, ahi)
|
| 886 |
+
else:
|
| 887 |
+
first = self._dump('-', a, alo, ahi)
|
| 888 |
+
second = self._dump('+', b, blo, bhi)
|
| 889 |
+
|
| 890 |
+
for g in first, second:
|
| 891 |
+
yield from g
|
| 892 |
+
|
| 893 |
+
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
|
| 894 |
+
r"""
|
| 895 |
+
When replacing one block of lines with another, search the blocks
|
| 896 |
+
for *similar* lines; the best-matching pair (if any) is used as a
|
| 897 |
+
synch point, and intraline difference marking is done on the
|
| 898 |
+
similar pair. Lots of work, but often worth it.
|
| 899 |
+
|
| 900 |
+
Example:
|
| 901 |
+
|
| 902 |
+
>>> d = Differ()
|
| 903 |
+
>>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
|
| 904 |
+
... ['abcdefGhijkl\n'], 0, 1)
|
| 905 |
+
>>> print(''.join(results), end="")
|
| 906 |
+
- abcDefghiJkl
|
| 907 |
+
? ^ ^ ^
|
| 908 |
+
+ abcdefGhijkl
|
| 909 |
+
? ^ ^ ^
|
| 910 |
+
"""
|
| 911 |
+
|
| 912 |
+
# don't synch up unless the lines have a similarity score of at
|
| 913 |
+
# least cutoff; best_ratio tracks the best score seen so far
|
| 914 |
+
best_ratio, cutoff = 0.74, 0.75
|
| 915 |
+
cruncher = SequenceMatcher(self.charjunk)
|
| 916 |
+
eqi, eqj = None, None # 1st indices of equal lines (if any)
|
| 917 |
+
|
| 918 |
+
# search for the pair that matches best without being identical
|
| 919 |
+
# (identical lines must be junk lines, & we don't want to synch up
|
| 920 |
+
# on junk -- unless we have to)
|
| 921 |
+
for j in range(blo, bhi):
|
| 922 |
+
bj = b[j]
|
| 923 |
+
cruncher.set_seq2(bj)
|
| 924 |
+
for i in range(alo, ahi):
|
| 925 |
+
ai = a[i]
|
| 926 |
+
if ai == bj:
|
| 927 |
+
if eqi is None:
|
| 928 |
+
eqi, eqj = i, j
|
| 929 |
+
continue
|
| 930 |
+
cruncher.set_seq1(ai)
|
| 931 |
+
# computing similarity is expensive, so use the quick
|
| 932 |
+
# upper bounds first -- have seen this speed up messy
|
| 933 |
+
# compares by a factor of 3.
|
| 934 |
+
# note that ratio() is only expensive to compute the first
|
| 935 |
+
# time it's called on a sequence pair; the expensive part
|
| 936 |
+
# of the computation is cached by cruncher
|
| 937 |
+
if cruncher.real_quick_ratio() > best_ratio and \
|
| 938 |
+
cruncher.quick_ratio() > best_ratio and \
|
| 939 |
+
cruncher.ratio() > best_ratio:
|
| 940 |
+
best_ratio, best_i, best_j = cruncher.ratio(), i, j
|
| 941 |
+
if best_ratio < cutoff:
|
| 942 |
+
# no non-identical "pretty close" pair
|
| 943 |
+
if eqi is None:
|
| 944 |
+
# no identical pair either -- treat it as a straight replace
|
| 945 |
+
yield from self._plain_replace(a, alo, ahi, b, blo, bhi)
|
| 946 |
+
return
|
| 947 |
+
# no close pair, but an identical pair -- synch up on that
|
| 948 |
+
best_i, best_j, best_ratio = eqi, eqj, 1.0
|
| 949 |
+
else:
|
| 950 |
+
# there's a close pair, so forget the identical pair (if any)
|
| 951 |
+
eqi = None
|
| 952 |
+
|
| 953 |
+
# a[best_i] very similar to b[best_j]; eqi is None iff they're not
|
| 954 |
+
# identical
|
| 955 |
+
|
| 956 |
+
# pump out diffs from before the synch point
|
| 957 |
+
yield from self._fancy_helper(a, alo, best_i, b, blo, best_j)
|
| 958 |
+
|
| 959 |
+
# do intraline marking on the synch pair
|
| 960 |
+
aelt, belt = a[best_i], b[best_j]
|
| 961 |
+
if eqi is None:
|
| 962 |
+
# pump out a '-', '?', '+', '?' quad for the synched lines
|
| 963 |
+
atags = btags = ""
|
| 964 |
+
cruncher.set_seqs(aelt, belt)
|
| 965 |
+
for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
|
| 966 |
+
la, lb = ai2 - ai1, bj2 - bj1
|
| 967 |
+
if tag == 'replace':
|
| 968 |
+
atags += '^' * la
|
| 969 |
+
btags += '^' * lb
|
| 970 |
+
elif tag == 'delete':
|
| 971 |
+
atags += '-' * la
|
| 972 |
+
elif tag == 'insert':
|
| 973 |
+
btags += '+' * lb
|
| 974 |
+
elif tag == 'equal':
|
| 975 |
+
atags += ' ' * la
|
| 976 |
+
btags += ' ' * lb
|
| 977 |
+
else:
|
| 978 |
+
raise ValueError('unknown tag %r' % (tag,))
|
| 979 |
+
yield from self._qformat(aelt, belt, atags, btags)
|
| 980 |
+
else:
|
| 981 |
+
# the synch pair is identical
|
| 982 |
+
yield ' ' + aelt
|
| 983 |
+
|
| 984 |
+
# pump out diffs from after the synch point
|
| 985 |
+
yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi)
|
| 986 |
+
|
| 987 |
+
def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
|
| 988 |
+
g = []
|
| 989 |
+
if alo < ahi:
|
| 990 |
+
if blo < bhi:
|
| 991 |
+
g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
|
| 992 |
+
else:
|
| 993 |
+
g = self._dump('-', a, alo, ahi)
|
| 994 |
+
elif blo < bhi:
|
| 995 |
+
g = self._dump('+', b, blo, bhi)
|
| 996 |
+
|
| 997 |
+
yield from g
|
| 998 |
+
|
| 999 |
+
def _qformat(self, aline, bline, atags, btags):
|
| 1000 |
+
r"""
|
| 1001 |
+
Format "?" output and deal with tabs.
|
| 1002 |
+
|
| 1003 |
+
Example:
|
| 1004 |
+
|
| 1005 |
+
>>> d = Differ()
|
| 1006 |
+
>>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n',
|
| 1007 |
+
... ' ^ ^ ^ ', ' ^ ^ ^ ')
|
| 1008 |
+
>>> for line in results: print(repr(line))
|
| 1009 |
+
...
|
| 1010 |
+
'- \tabcDefghiJkl\n'
|
| 1011 |
+
'? \t ^ ^ ^\n'
|
| 1012 |
+
'+ \tabcdefGhijkl\n'
|
| 1013 |
+
'? \t ^ ^ ^\n'
|
| 1014 |
+
"""
|
| 1015 |
+
atags = _keep_original_ws(aline, atags).rstrip()
|
| 1016 |
+
btags = _keep_original_ws(bline, btags).rstrip()
|
| 1017 |
+
|
| 1018 |
+
yield "- " + aline
|
| 1019 |
+
if atags:
|
| 1020 |
+
yield f"? {atags}\n"
|
| 1021 |
+
|
| 1022 |
+
yield "+ " + bline
|
| 1023 |
+
if btags:
|
| 1024 |
+
yield f"? {btags}\n"
|
| 1025 |
+
|
| 1026 |
+
# With respect to junk, an earlier version of ndiff simply refused to
|
| 1027 |
+
# *start* a match with a junk element. The result was cases like this:
|
| 1028 |
+
# before: private Thread currentThread;
|
| 1029 |
+
# after: private volatile Thread currentThread;
|
| 1030 |
+
# If you consider whitespace to be junk, the longest contiguous match
|
| 1031 |
+
# not starting with junk is "e Thread currentThread". So ndiff reported
|
| 1032 |
+
# that "e volatil" was inserted between the 't' and the 'e' in "private".
|
| 1033 |
+
# While an accurate view, to people that's absurd. The current version
|
| 1034 |
+
# looks for matching blocks that are entirely junk-free, then extends the
|
| 1035 |
+
# longest one of those as far as possible but only with matching junk.
|
| 1036 |
+
# So now "currentThread" is matched, then extended to suck up the
|
| 1037 |
+
# preceding blank; then "private" is matched, and extended to suck up the
|
| 1038 |
+
# following blank; then "Thread" is matched; and finally ndiff reports
|
| 1039 |
+
# that "volatile " was inserted before "Thread". The only quibble
|
| 1040 |
+
# remaining is that perhaps it was really the case that " volatile"
|
| 1041 |
+
# was inserted after "private". I can live with that <wink>.
|
| 1042 |
+
|
| 1043 |
+
import re
|
| 1044 |
+
|
| 1045 |
+
def IS_LINE_JUNK(line, pat=re.compile(r"\s*(?:#\s*)?$").match):
|
| 1046 |
+
r"""
|
| 1047 |
+
Return True for ignorable line: iff `line` is blank or contains a single '#'.
|
| 1048 |
+
|
| 1049 |
+
Examples:
|
| 1050 |
+
|
| 1051 |
+
>>> IS_LINE_JUNK('\n')
|
| 1052 |
+
True
|
| 1053 |
+
>>> IS_LINE_JUNK(' # \n')
|
| 1054 |
+
True
|
| 1055 |
+
>>> IS_LINE_JUNK('hello\n')
|
| 1056 |
+
False
|
| 1057 |
+
"""
|
| 1058 |
+
|
| 1059 |
+
return pat(line) is not None
|
| 1060 |
+
|
| 1061 |
+
def IS_CHARACTER_JUNK(ch, ws=" \t"):
|
| 1062 |
+
r"""
|
| 1063 |
+
Return True for ignorable character: iff `ch` is a space or tab.
|
| 1064 |
+
|
| 1065 |
+
Examples:
|
| 1066 |
+
|
| 1067 |
+
>>> IS_CHARACTER_JUNK(' ')
|
| 1068 |
+
True
|
| 1069 |
+
>>> IS_CHARACTER_JUNK('\t')
|
| 1070 |
+
True
|
| 1071 |
+
>>> IS_CHARACTER_JUNK('\n')
|
| 1072 |
+
False
|
| 1073 |
+
>>> IS_CHARACTER_JUNK('x')
|
| 1074 |
+
False
|
| 1075 |
+
"""
|
| 1076 |
+
|
| 1077 |
+
return ch in ws
|
| 1078 |
+
|
| 1079 |
+
|
| 1080 |
+
########################################################################
|
| 1081 |
+
### Unified Diff
|
| 1082 |
+
########################################################################
|
| 1083 |
+
|
| 1084 |
+
def _format_range_unified(start, stop):
|
| 1085 |
+
'Convert range to the "ed" format'
|
| 1086 |
+
# Per the diff spec at http://www.unix.org/single_unix_specification/
|
| 1087 |
+
beginning = start + 1 # lines start numbering with one
|
| 1088 |
+
length = stop - start
|
| 1089 |
+
if length == 1:
|
| 1090 |
+
return '{}'.format(beginning)
|
| 1091 |
+
if not length:
|
| 1092 |
+
beginning -= 1 # empty ranges begin at line just before the range
|
| 1093 |
+
return '{},{}'.format(beginning, length)
|
| 1094 |
+
|
| 1095 |
+
def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
|
| 1096 |
+
tofiledate='', n=3, lineterm='\n'):
|
| 1097 |
+
r"""
|
| 1098 |
+
Compare two sequences of lines; generate the delta as a unified diff.
|
| 1099 |
+
|
| 1100 |
+
Unified diffs are a compact way of showing line changes and a few
|
| 1101 |
+
lines of context. The number of context lines is set by 'n' which
|
| 1102 |
+
defaults to three.
|
| 1103 |
+
|
| 1104 |
+
By default, the diff control lines (those with ---, +++, or @@) are
|
| 1105 |
+
created with a trailing newline. This is helpful so that inputs
|
| 1106 |
+
created from file.readlines() result in diffs that are suitable for
|
| 1107 |
+
file.writelines() since both the inputs and outputs have trailing
|
| 1108 |
+
newlines.
|
| 1109 |
+
|
| 1110 |
+
For inputs that do not have trailing newlines, set the lineterm
|
| 1111 |
+
argument to "" so that the output will be uniformly newline free.
|
| 1112 |
+
|
| 1113 |
+
The unidiff format normally has a header for filenames and modification
|
| 1114 |
+
times. Any or all of these may be specified using strings for
|
| 1115 |
+
'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
| 1116 |
+
The modification times are normally expressed in the ISO 8601 format.
|
| 1117 |
+
|
| 1118 |
+
Example:
|
| 1119 |
+
|
| 1120 |
+
>>> for line in unified_diff('one two three four'.split(),
|
| 1121 |
+
... 'zero one tree four'.split(), 'Original', 'Current',
|
| 1122 |
+
... '2005-01-26 23:30:50', '2010-04-02 10:20:52',
|
| 1123 |
+
... lineterm=''):
|
| 1124 |
+
... print(line) # doctest: +NORMALIZE_WHITESPACE
|
| 1125 |
+
--- Original 2005-01-26 23:30:50
|
| 1126 |
+
+++ Current 2010-04-02 10:20:52
|
| 1127 |
+
@@ -1,4 +1,4 @@
|
| 1128 |
+
+zero
|
| 1129 |
+
one
|
| 1130 |
+
-two
|
| 1131 |
+
-three
|
| 1132 |
+
+tree
|
| 1133 |
+
four
|
| 1134 |
+
"""
|
| 1135 |
+
|
| 1136 |
+
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
|
| 1137 |
+
started = False
|
| 1138 |
+
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
|
| 1139 |
+
if not started:
|
| 1140 |
+
started = True
|
| 1141 |
+
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
|
| 1142 |
+
todate = '\t{}'.format(tofiledate) if tofiledate else ''
|
| 1143 |
+
yield '--- {}{}{}'.format(fromfile, fromdate, lineterm)
|
| 1144 |
+
yield '+++ {}{}{}'.format(tofile, todate, lineterm)
|
| 1145 |
+
|
| 1146 |
+
first, last = group[0], group[-1]
|
| 1147 |
+
file1_range = _format_range_unified(first[1], last[2])
|
| 1148 |
+
file2_range = _format_range_unified(first[3], last[4])
|
| 1149 |
+
yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm)
|
| 1150 |
+
|
| 1151 |
+
for tag, i1, i2, j1, j2 in group:
|
| 1152 |
+
if tag == 'equal':
|
| 1153 |
+
for line in a[i1:i2]:
|
| 1154 |
+
yield ' ' + line
|
| 1155 |
+
continue
|
| 1156 |
+
if tag in {'replace', 'delete'}:
|
| 1157 |
+
for line in a[i1:i2]:
|
| 1158 |
+
yield '-' + line
|
| 1159 |
+
if tag in {'replace', 'insert'}:
|
| 1160 |
+
for line in b[j1:j2]:
|
| 1161 |
+
yield '+' + line
|
| 1162 |
+
|
| 1163 |
+
|
| 1164 |
+
########################################################################
|
| 1165 |
+
### Context Diff
|
| 1166 |
+
########################################################################
|
| 1167 |
+
|
| 1168 |
+
def _format_range_context(start, stop):
|
| 1169 |
+
'Convert range to the "ed" format'
|
| 1170 |
+
# Per the diff spec at http://www.unix.org/single_unix_specification/
|
| 1171 |
+
beginning = start + 1 # lines start numbering with one
|
| 1172 |
+
length = stop - start
|
| 1173 |
+
if not length:
|
| 1174 |
+
beginning -= 1 # empty ranges begin at line just before the range
|
| 1175 |
+
if length <= 1:
|
| 1176 |
+
return '{}'.format(beginning)
|
| 1177 |
+
return '{},{}'.format(beginning, beginning + length - 1)
|
| 1178 |
+
|
| 1179 |
+
# See http://www.unix.org/single_unix_specification/
|
| 1180 |
+
def context_diff(a, b, fromfile='', tofile='',
|
| 1181 |
+
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
|
| 1182 |
+
r"""
|
| 1183 |
+
Compare two sequences of lines; generate the delta as a context diff.
|
| 1184 |
+
|
| 1185 |
+
Context diffs are a compact way of showing line changes and a few
|
| 1186 |
+
lines of context. The number of context lines is set by 'n' which
|
| 1187 |
+
defaults to three.
|
| 1188 |
+
|
| 1189 |
+
By default, the diff control lines (those with *** or ---) are
|
| 1190 |
+
created with a trailing newline. This is helpful so that inputs
|
| 1191 |
+
created from file.readlines() result in diffs that are suitable for
|
| 1192 |
+
file.writelines() since both the inputs and outputs have trailing
|
| 1193 |
+
newlines.
|
| 1194 |
+
|
| 1195 |
+
For inputs that do not have trailing newlines, set the lineterm
|
| 1196 |
+
argument to "" so that the output will be uniformly newline free.
|
| 1197 |
+
|
| 1198 |
+
The context diff format normally has a header for filenames and
|
| 1199 |
+
modification times. Any or all of these may be specified using
|
| 1200 |
+
strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
| 1201 |
+
The modification times are normally expressed in the ISO 8601 format.
|
| 1202 |
+
If not specified, the strings default to blanks.
|
| 1203 |
+
|
| 1204 |
+
Example:
|
| 1205 |
+
|
| 1206 |
+
>>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
|
| 1207 |
+
... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')),
|
| 1208 |
+
... end="")
|
| 1209 |
+
*** Original
|
| 1210 |
+
--- Current
|
| 1211 |
+
***************
|
| 1212 |
+
*** 1,4 ****
|
| 1213 |
+
one
|
| 1214 |
+
! two
|
| 1215 |
+
! three
|
| 1216 |
+
four
|
| 1217 |
+
--- 1,4 ----
|
| 1218 |
+
+ zero
|
| 1219 |
+
one
|
| 1220 |
+
! tree
|
| 1221 |
+
four
|
| 1222 |
+
"""
|
| 1223 |
+
|
| 1224 |
+
_check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm)
|
| 1225 |
+
prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ')
|
| 1226 |
+
started = False
|
| 1227 |
+
for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
|
| 1228 |
+
if not started:
|
| 1229 |
+
started = True
|
| 1230 |
+
fromdate = '\t{}'.format(fromfiledate) if fromfiledate else ''
|
| 1231 |
+
todate = '\t{}'.format(tofiledate) if tofiledate else ''
|
| 1232 |
+
yield '*** {}{}{}'.format(fromfile, fromdate, lineterm)
|
| 1233 |
+
yield '--- {}{}{}'.format(tofile, todate, lineterm)
|
| 1234 |
+
|
| 1235 |
+
first, last = group[0], group[-1]
|
| 1236 |
+
yield '***************' + lineterm
|
| 1237 |
+
|
| 1238 |
+
file1_range = _format_range_context(first[1], last[2])
|
| 1239 |
+
yield '*** {} ****{}'.format(file1_range, lineterm)
|
| 1240 |
+
|
| 1241 |
+
if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group):
|
| 1242 |
+
for tag, i1, i2, _, _ in group:
|
| 1243 |
+
if tag != 'insert':
|
| 1244 |
+
for line in a[i1:i2]:
|
| 1245 |
+
yield prefix[tag] + line
|
| 1246 |
+
|
| 1247 |
+
file2_range = _format_range_context(first[3], last[4])
|
| 1248 |
+
yield '--- {} ----{}'.format(file2_range, lineterm)
|
| 1249 |
+
|
| 1250 |
+
if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group):
|
| 1251 |
+
for tag, _, _, j1, j2 in group:
|
| 1252 |
+
if tag != 'delete':
|
| 1253 |
+
for line in b[j1:j2]:
|
| 1254 |
+
yield prefix[tag] + line
|
| 1255 |
+
|
| 1256 |
+
def _check_types(a, b, *args):
|
| 1257 |
+
# Checking types is weird, but the alternative is garbled output when
|
| 1258 |
+
# someone passes mixed bytes and str to {unified,context}_diff(). E.g.
|
| 1259 |
+
# without this check, passing filenames as bytes results in output like
|
| 1260 |
+
# --- b'oldfile.txt'
|
| 1261 |
+
# +++ b'newfile.txt'
|
| 1262 |
+
# because of how str.format() incorporates bytes objects.
|
| 1263 |
+
if a and not isinstance(a[0], str):
|
| 1264 |
+
raise TypeError('lines to compare must be str, not %s (%r)' %
|
| 1265 |
+
(type(a[0]).__name__, a[0]))
|
| 1266 |
+
if b and not isinstance(b[0], str):
|
| 1267 |
+
raise TypeError('lines to compare must be str, not %s (%r)' %
|
| 1268 |
+
(type(b[0]).__name__, b[0]))
|
| 1269 |
+
for arg in args:
|
| 1270 |
+
if not isinstance(arg, str):
|
| 1271 |
+
raise TypeError('all arguments must be str, not: %r' % (arg,))
|
| 1272 |
+
|
| 1273 |
+
def diff_bytes(dfunc, a, b, fromfile=b'', tofile=b'',
|
| 1274 |
+
fromfiledate=b'', tofiledate=b'', n=3, lineterm=b'\n'):
|
| 1275 |
+
r"""
|
| 1276 |
+
Compare `a` and `b`, two sequences of lines represented as bytes rather
|
| 1277 |
+
than str. This is a wrapper for `dfunc`, which is typically either
|
| 1278 |
+
unified_diff() or context_diff(). Inputs are losslessly converted to
|
| 1279 |
+
strings so that `dfunc` only has to worry about strings, and encoded
|
| 1280 |
+
back to bytes on return. This is necessary to compare files with
|
| 1281 |
+
unknown or inconsistent encoding. All other inputs (except `n`) must be
|
| 1282 |
+
bytes rather than str.
|
| 1283 |
+
"""
|
| 1284 |
+
def decode(s):
|
| 1285 |
+
try:
|
| 1286 |
+
return s.decode('ascii', 'surrogateescape')
|
| 1287 |
+
except AttributeError as err:
|
| 1288 |
+
msg = ('all arguments must be bytes, not %s (%r)' %
|
| 1289 |
+
(type(s).__name__, s))
|
| 1290 |
+
raise TypeError(msg) from err
|
| 1291 |
+
a = list(map(decode, a))
|
| 1292 |
+
b = list(map(decode, b))
|
| 1293 |
+
fromfile = decode(fromfile)
|
| 1294 |
+
tofile = decode(tofile)
|
| 1295 |
+
fromfiledate = decode(fromfiledate)
|
| 1296 |
+
tofiledate = decode(tofiledate)
|
| 1297 |
+
lineterm = decode(lineterm)
|
| 1298 |
+
|
| 1299 |
+
lines = dfunc(a, b, fromfile, tofile, fromfiledate, tofiledate, n, lineterm)
|
| 1300 |
+
for line in lines:
|
| 1301 |
+
yield line.encode('ascii', 'surrogateescape')
|
| 1302 |
+
|
| 1303 |
+
def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
|
| 1304 |
+
r"""
|
| 1305 |
+
Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
|
| 1306 |
+
|
| 1307 |
+
Optional keyword parameters `linejunk` and `charjunk` are for filter
|
| 1308 |
+
functions, or can be None:
|
| 1309 |
+
|
| 1310 |
+
- linejunk: A function that should accept a single string argument and
|
| 1311 |
+
return true iff the string is junk. The default is None, and is
|
| 1312 |
+
recommended; the underlying SequenceMatcher class has an adaptive
|
| 1313 |
+
notion of "noise" lines.
|
| 1314 |
+
|
| 1315 |
+
- charjunk: A function that accepts a character (string of length
|
| 1316 |
+
1), and returns true iff the character is junk. The default is
|
| 1317 |
+
the module-level function IS_CHARACTER_JUNK, which filters out
|
| 1318 |
+
whitespace characters (a blank or tab; note: it's a bad idea to
|
| 1319 |
+
include newline in this!).
|
| 1320 |
+
|
| 1321 |
+
Tools/scripts/ndiff.py is a command-line front-end to this function.
|
| 1322 |
+
|
| 1323 |
+
Example:
|
| 1324 |
+
|
| 1325 |
+
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
|
| 1326 |
+
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
|
| 1327 |
+
>>> print(''.join(diff), end="")
|
| 1328 |
+
- one
|
| 1329 |
+
? ^
|
| 1330 |
+
+ ore
|
| 1331 |
+
? ^
|
| 1332 |
+
- two
|
| 1333 |
+
- three
|
| 1334 |
+
? -
|
| 1335 |
+
+ tree
|
| 1336 |
+
+ emu
|
| 1337 |
+
"""
|
| 1338 |
+
return Differ(linejunk, charjunk).compare(a, b)
|
| 1339 |
+
|
| 1340 |
+
def _mdiff(fromlines, tolines, context=None, linejunk=None,
|
| 1341 |
+
charjunk=IS_CHARACTER_JUNK):
|
| 1342 |
+
r"""Returns generator yielding marked up from/to side by side differences.
|
| 1343 |
+
|
| 1344 |
+
Arguments:
|
| 1345 |
+
fromlines -- list of text lines to compared to tolines
|
| 1346 |
+
tolines -- list of text lines to be compared to fromlines
|
| 1347 |
+
context -- number of context lines to display on each side of difference,
|
| 1348 |
+
if None, all from/to text lines will be generated.
|
| 1349 |
+
linejunk -- passed on to ndiff (see ndiff documentation)
|
| 1350 |
+
charjunk -- passed on to ndiff (see ndiff documentation)
|
| 1351 |
+
|
| 1352 |
+
This function returns an iterator which returns a tuple:
|
| 1353 |
+
(from line tuple, to line tuple, boolean flag)
|
| 1354 |
+
|
| 1355 |
+
from/to line tuple -- (line num, line text)
|
| 1356 |
+
line num -- integer or None (to indicate a context separation)
|
| 1357 |
+
line text -- original line text with following markers inserted:
|
| 1358 |
+
'\0+' -- marks start of added text
|
| 1359 |
+
'\0-' -- marks start of deleted text
|
| 1360 |
+
'\0^' -- marks start of changed text
|
| 1361 |
+
'\1' -- marks end of added/deleted/changed text
|
| 1362 |
+
|
| 1363 |
+
boolean flag -- None indicates context separation, True indicates
|
| 1364 |
+
either "from" or "to" line contains a change, otherwise False.
|
| 1365 |
+
|
| 1366 |
+
This function/iterator was originally developed to generate side by side
|
| 1367 |
+
file difference for making HTML pages (see HtmlDiff class for example
|
| 1368 |
+
usage).
|
| 1369 |
+
|
| 1370 |
+
Note, this function utilizes the ndiff function to generate the side by
|
| 1371 |
+
side difference markup. Optional ndiff arguments may be passed to this
|
| 1372 |
+
function and they in turn will be passed to ndiff.
|
| 1373 |
+
"""
|
| 1374 |
+
import re
|
| 1375 |
+
|
| 1376 |
+
# regular expression for finding intraline change indices
|
| 1377 |
+
change_re = re.compile(r'(\++|\-+|\^+)')
|
| 1378 |
+
|
| 1379 |
+
# create the difference iterator to generate the differences
|
| 1380 |
+
diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
|
| 1381 |
+
|
| 1382 |
+
def _make_line(lines, format_key, side, num_lines=[0,0]):
|
| 1383 |
+
"""Returns line of text with user's change markup and line formatting.
|
| 1384 |
+
|
| 1385 |
+
lines -- list of lines from the ndiff generator to produce a line of
|
| 1386 |
+
text from. When producing the line of text to return, the
|
| 1387 |
+
lines used are removed from this list.
|
| 1388 |
+
format_key -- '+' return first line in list with "add" markup around
|
| 1389 |
+
the entire line.
|
| 1390 |
+
'-' return first line in list with "delete" markup around
|
| 1391 |
+
the entire line.
|
| 1392 |
+
'?' return first line in list with add/delete/change
|
| 1393 |
+
intraline markup (indices obtained from second line)
|
| 1394 |
+
None return first line in list with no markup
|
| 1395 |
+
side -- indice into the num_lines list (0=from,1=to)
|
| 1396 |
+
num_lines -- from/to current line number. This is NOT intended to be a
|
| 1397 |
+
passed parameter. It is present as a keyword argument to
|
| 1398 |
+
maintain memory of the current line numbers between calls
|
| 1399 |
+
of this function.
|
| 1400 |
+
|
| 1401 |
+
Note, this function is purposefully not defined at the module scope so
|
| 1402 |
+
that data it needs from its parent function (within whose context it
|
| 1403 |
+
is defined) does not need to be of module scope.
|
| 1404 |
+
"""
|
| 1405 |
+
num_lines[side] += 1
|
| 1406 |
+
# Handle case where no user markup is to be added, just return line of
|
| 1407 |
+
# text with user's line format to allow for usage of the line number.
|
| 1408 |
+
if format_key is None:
|
| 1409 |
+
return (num_lines[side],lines.pop(0)[2:])
|
| 1410 |
+
# Handle case of intraline changes
|
| 1411 |
+
if format_key == '?':
|
| 1412 |
+
text, markers = lines.pop(0), lines.pop(0)
|
| 1413 |
+
# find intraline changes (store change type and indices in tuples)
|
| 1414 |
+
sub_info = []
|
| 1415 |
+
def record_sub_info(match_object,sub_info=sub_info):
|
| 1416 |
+
sub_info.append([match_object.group(1)[0],match_object.span()])
|
| 1417 |
+
return match_object.group(1)
|
| 1418 |
+
change_re.sub(record_sub_info,markers)
|
| 1419 |
+
# process each tuple inserting our special marks that won't be
|
| 1420 |
+
# noticed by an xml/html escaper.
|
| 1421 |
+
for key,(begin,end) in reversed(sub_info):
|
| 1422 |
+
text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
|
| 1423 |
+
text = text[2:]
|
| 1424 |
+
# Handle case of add/delete entire line
|
| 1425 |
+
else:
|
| 1426 |
+
text = lines.pop(0)[2:]
|
| 1427 |
+
# if line of text is just a newline, insert a space so there is
|
| 1428 |
+
# something for the user to highlight and see.
|
| 1429 |
+
if not text:
|
| 1430 |
+
text = ' '
|
| 1431 |
+
# insert marks that won't be noticed by an xml/html escaper.
|
| 1432 |
+
text = '\0' + format_key + text + '\1'
|
| 1433 |
+
# Return line of text, first allow user's line formatter to do its
|
| 1434 |
+
# thing (such as adding the line number) then replace the special
|
| 1435 |
+
# marks with what the user's change markup.
|
| 1436 |
+
return (num_lines[side],text)
|
| 1437 |
+
|
| 1438 |
+
def _line_iterator():
|
| 1439 |
+
"""Yields from/to lines of text with a change indication.
|
| 1440 |
+
|
| 1441 |
+
This function is an iterator. It itself pulls lines from a
|
| 1442 |
+
differencing iterator, processes them and yields them. When it can
|
| 1443 |
+
it yields both a "from" and a "to" line, otherwise it will yield one
|
| 1444 |
+
or the other. In addition to yielding the lines of from/to text, a
|
| 1445 |
+
boolean flag is yielded to indicate if the text line(s) have
|
| 1446 |
+
differences in them.
|
| 1447 |
+
|
| 1448 |
+
Note, this function is purposefully not defined at the module scope so
|
| 1449 |
+
that data it needs from its parent function (within whose context it
|
| 1450 |
+
is defined) does not need to be of module scope.
|
| 1451 |
+
"""
|
| 1452 |
+
lines = []
|
| 1453 |
+
num_blanks_pending, num_blanks_to_yield = 0, 0
|
| 1454 |
+
while True:
|
| 1455 |
+
# Load up next 4 lines so we can look ahead, create strings which
|
| 1456 |
+
# are a concatenation of the first character of each of the 4 lines
|
| 1457 |
+
# so we can do some very readable comparisons.
|
| 1458 |
+
while len(lines) < 4:
|
| 1459 |
+
lines.append(next(diff_lines_iterator, 'X'))
|
| 1460 |
+
s = ''.join([line[0] for line in lines])
|
| 1461 |
+
if s.startswith('X'):
|
| 1462 |
+
# When no more lines, pump out any remaining blank lines so the
|
| 1463 |
+
# corresponding add/delete lines get a matching blank line so
|
| 1464 |
+
# all line pairs get yielded at the next level.
|
| 1465 |
+
num_blanks_to_yield = num_blanks_pending
|
| 1466 |
+
elif s.startswith('-?+?'):
|
| 1467 |
+
# simple intraline change
|
| 1468 |
+
yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
|
| 1469 |
+
continue
|
| 1470 |
+
elif s.startswith('--++'):
|
| 1471 |
+
# in delete block, add block coming: we do NOT want to get
|
| 1472 |
+
# caught up on blank lines yet, just process the delete line
|
| 1473 |
+
num_blanks_pending -= 1
|
| 1474 |
+
yield _make_line(lines,'-',0), None, True
|
| 1475 |
+
continue
|
| 1476 |
+
elif s.startswith(('--?+', '--+', '- ')):
|
| 1477 |
+
# in delete block and see an intraline change or unchanged line
|
| 1478 |
+
# coming: yield the delete line and then blanks
|
| 1479 |
+
from_line,to_line = _make_line(lines,'-',0), None
|
| 1480 |
+
num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
|
| 1481 |
+
elif s.startswith('-+?'):
|
| 1482 |
+
# intraline change
|
| 1483 |
+
yield _make_line(lines,None,0), _make_line(lines,'?',1), True
|
| 1484 |
+
continue
|
| 1485 |
+
elif s.startswith('-?+'):
|
| 1486 |
+
# intraline change
|
| 1487 |
+
yield _make_line(lines,'?',0), _make_line(lines,None,1), True
|
| 1488 |
+
continue
|
| 1489 |
+
elif s.startswith('-'):
|
| 1490 |
+
# delete FROM line
|
| 1491 |
+
num_blanks_pending -= 1
|
| 1492 |
+
yield _make_line(lines,'-',0), None, True
|
| 1493 |
+
continue
|
| 1494 |
+
elif s.startswith('+--'):
|
| 1495 |
+
# in add block, delete block coming: we do NOT want to get
|
| 1496 |
+
# caught up on blank lines yet, just process the add line
|
| 1497 |
+
num_blanks_pending += 1
|
| 1498 |
+
yield None, _make_line(lines,'+',1), True
|
| 1499 |
+
continue
|
| 1500 |
+
elif s.startswith(('+ ', '+-')):
|
| 1501 |
+
# will be leaving an add block: yield blanks then add line
|
| 1502 |
+
from_line, to_line = None, _make_line(lines,'+',1)
|
| 1503 |
+
num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
|
| 1504 |
+
elif s.startswith('+'):
|
| 1505 |
+
# inside an add block, yield the add line
|
| 1506 |
+
num_blanks_pending += 1
|
| 1507 |
+
yield None, _make_line(lines,'+',1), True
|
| 1508 |
+
continue
|
| 1509 |
+
elif s.startswith(' '):
|
| 1510 |
+
# unchanged text, yield it to both sides
|
| 1511 |
+
yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
|
| 1512 |
+
continue
|
| 1513 |
+
# Catch up on the blank lines so when we yield the next from/to
|
| 1514 |
+
# pair, they are lined up.
|
| 1515 |
+
while(num_blanks_to_yield < 0):
|
| 1516 |
+
num_blanks_to_yield += 1
|
| 1517 |
+
yield None,('','\n'),True
|
| 1518 |
+
while(num_blanks_to_yield > 0):
|
| 1519 |
+
num_blanks_to_yield -= 1
|
| 1520 |
+
yield ('','\n'),None,True
|
| 1521 |
+
if s.startswith('X'):
|
| 1522 |
+
return
|
| 1523 |
+
else:
|
| 1524 |
+
yield from_line,to_line,True
|
| 1525 |
+
|
| 1526 |
+
def _line_pair_iterator():
|
| 1527 |
+
"""Yields from/to lines of text with a change indication.
|
| 1528 |
+
|
| 1529 |
+
This function is an iterator. It itself pulls lines from the line
|
| 1530 |
+
iterator. Its difference from that iterator is that this function
|
| 1531 |
+
always yields a pair of from/to text lines (with the change
|
| 1532 |
+
indication). If necessary it will collect single from/to lines
|
| 1533 |
+
until it has a matching pair from/to pair to yield.
|
| 1534 |
+
|
| 1535 |
+
Note, this function is purposefully not defined at the module scope so
|
| 1536 |
+
that data it needs from its parent function (within whose context it
|
| 1537 |
+
is defined) does not need to be of module scope.
|
| 1538 |
+
"""
|
| 1539 |
+
line_iterator = _line_iterator()
|
| 1540 |
+
fromlines,tolines=[],[]
|
| 1541 |
+
while True:
|
| 1542 |
+
# Collecting lines of text until we have a from/to pair
|
| 1543 |
+
while (len(fromlines)==0 or len(tolines)==0):
|
| 1544 |
+
try:
|
| 1545 |
+
from_line, to_line, found_diff = next(line_iterator)
|
| 1546 |
+
except StopIteration:
|
| 1547 |
+
return
|
| 1548 |
+
if from_line is not None:
|
| 1549 |
+
fromlines.append((from_line,found_diff))
|
| 1550 |
+
if to_line is not None:
|
| 1551 |
+
tolines.append((to_line,found_diff))
|
| 1552 |
+
# Once we have a pair, remove them from the collection and yield it
|
| 1553 |
+
from_line, fromDiff = fromlines.pop(0)
|
| 1554 |
+
to_line, to_diff = tolines.pop(0)
|
| 1555 |
+
yield (from_line,to_line,fromDiff or to_diff)
|
| 1556 |
+
|
| 1557 |
+
# Handle case where user does not want context differencing, just yield
|
| 1558 |
+
# them up without doing anything else with them.
|
| 1559 |
+
line_pair_iterator = _line_pair_iterator()
|
| 1560 |
+
if context is None:
|
| 1561 |
+
yield from line_pair_iterator
|
| 1562 |
+
# Handle case where user wants context differencing. We must do some
|
| 1563 |
+
# storage of lines until we know for sure that they are to be yielded.
|
| 1564 |
+
else:
|
| 1565 |
+
context += 1
|
| 1566 |
+
lines_to_write = 0
|
| 1567 |
+
while True:
|
| 1568 |
+
# Store lines up until we find a difference, note use of a
|
| 1569 |
+
# circular queue because we only need to keep around what
|
| 1570 |
+
# we need for context.
|
| 1571 |
+
index, contextLines = 0, [None]*(context)
|
| 1572 |
+
found_diff = False
|
| 1573 |
+
while(found_diff is False):
|
| 1574 |
+
try:
|
| 1575 |
+
from_line, to_line, found_diff = next(line_pair_iterator)
|
| 1576 |
+
except StopIteration:
|
| 1577 |
+
return
|
| 1578 |
+
i = index % context
|
| 1579 |
+
contextLines[i] = (from_line, to_line, found_diff)
|
| 1580 |
+
index += 1
|
| 1581 |
+
# Yield lines that we have collected so far, but first yield
|
| 1582 |
+
# the user's separator.
|
| 1583 |
+
if index > context:
|
| 1584 |
+
yield None, None, None
|
| 1585 |
+
lines_to_write = context
|
| 1586 |
+
else:
|
| 1587 |
+
lines_to_write = index
|
| 1588 |
+
index = 0
|
| 1589 |
+
while(lines_to_write):
|
| 1590 |
+
i = index % context
|
| 1591 |
+
index += 1
|
| 1592 |
+
yield contextLines[i]
|
| 1593 |
+
lines_to_write -= 1
|
| 1594 |
+
# Now yield the context lines after the change
|
| 1595 |
+
lines_to_write = context-1
|
| 1596 |
+
try:
|
| 1597 |
+
while(lines_to_write):
|
| 1598 |
+
from_line, to_line, found_diff = next(line_pair_iterator)
|
| 1599 |
+
# If another change within the context, extend the context
|
| 1600 |
+
if found_diff:
|
| 1601 |
+
lines_to_write = context-1
|
| 1602 |
+
else:
|
| 1603 |
+
lines_to_write -= 1
|
| 1604 |
+
yield from_line, to_line, found_diff
|
| 1605 |
+
except StopIteration:
|
| 1606 |
+
# Catch exception from next() and return normally
|
| 1607 |
+
return
|
| 1608 |
+
|
| 1609 |
+
|
| 1610 |
+
_file_template = """
|
| 1611 |
+
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
| 1612 |
+
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
| 1613 |
+
|
| 1614 |
+
<html>
|
| 1615 |
+
|
| 1616 |
+
<head>
|
| 1617 |
+
<meta http-equiv="Content-Type"
|
| 1618 |
+
content="text/html; charset=%(charset)s" />
|
| 1619 |
+
<title></title>
|
| 1620 |
+
<style type="text/css">%(styles)s
|
| 1621 |
+
</style>
|
| 1622 |
+
</head>
|
| 1623 |
+
|
| 1624 |
+
<body>
|
| 1625 |
+
%(table)s%(legend)s
|
| 1626 |
+
</body>
|
| 1627 |
+
|
| 1628 |
+
</html>"""
|
| 1629 |
+
|
| 1630 |
+
_styles = """
|
| 1631 |
+
table.diff {font-family:Courier; border:medium;}
|
| 1632 |
+
.diff_header {background-color:#e0e0e0}
|
| 1633 |
+
td.diff_header {text-align:right}
|
| 1634 |
+
.diff_next {background-color:#c0c0c0}
|
| 1635 |
+
.diff_add {background-color:#aaffaa}
|
| 1636 |
+
.diff_chg {background-color:#ffff77}
|
| 1637 |
+
.diff_sub {background-color:#ffaaaa}"""
|
| 1638 |
+
|
| 1639 |
+
_table_template = """
|
| 1640 |
+
<table class="diff" id="difflib_chg_%(prefix)s_top"
|
| 1641 |
+
cellspacing="0" cellpadding="0" rules="groups" >
|
| 1642 |
+
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
|
| 1643 |
+
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
|
| 1644 |
+
%(header_row)s
|
| 1645 |
+
<tbody>
|
| 1646 |
+
%(data_rows)s </tbody>
|
| 1647 |
+
</table>"""
|
| 1648 |
+
|
| 1649 |
+
_legend = """
|
| 1650 |
+
<table class="diff" summary="Legends">
|
| 1651 |
+
<tr> <th colspan="2"> Legends </th> </tr>
|
| 1652 |
+
<tr> <td> <table border="" summary="Colors">
|
| 1653 |
+
<tr><th> Colors </th> </tr>
|
| 1654 |
+
<tr><td class="diff_add"> Added </td></tr>
|
| 1655 |
+
<tr><td class="diff_chg">Changed</td> </tr>
|
| 1656 |
+
<tr><td class="diff_sub">Deleted</td> </tr>
|
| 1657 |
+
</table></td>
|
| 1658 |
+
<td> <table border="" summary="Links">
|
| 1659 |
+
<tr><th colspan="2"> Links </th> </tr>
|
| 1660 |
+
<tr><td>(f)irst change</td> </tr>
|
| 1661 |
+
<tr><td>(n)ext change</td> </tr>
|
| 1662 |
+
<tr><td>(t)op</td> </tr>
|
| 1663 |
+
</table></td> </tr>
|
| 1664 |
+
</table>"""
|
| 1665 |
+
|
| 1666 |
+
class HtmlDiff(object):
|
| 1667 |
+
"""For producing HTML side by side comparison with change highlights.
|
| 1668 |
+
|
| 1669 |
+
This class can be used to create an HTML table (or a complete HTML file
|
| 1670 |
+
containing the table) showing a side by side, line by line comparison
|
| 1671 |
+
of text with inter-line and intra-line change highlights. The table can
|
| 1672 |
+
be generated in either full or contextual difference mode.
|
| 1673 |
+
|
| 1674 |
+
The following methods are provided for HTML generation:
|
| 1675 |
+
|
| 1676 |
+
make_table -- generates HTML for a single side by side table
|
| 1677 |
+
make_file -- generates complete HTML file with a single side by side table
|
| 1678 |
+
|
| 1679 |
+
See tools/scripts/diff.py for an example usage of this class.
|
| 1680 |
+
"""
|
| 1681 |
+
|
| 1682 |
+
_file_template = _file_template
|
| 1683 |
+
_styles = _styles
|
| 1684 |
+
_table_template = _table_template
|
| 1685 |
+
_legend = _legend
|
| 1686 |
+
_default_prefix = 0
|
| 1687 |
+
|
| 1688 |
+
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
|
| 1689 |
+
charjunk=IS_CHARACTER_JUNK):
|
| 1690 |
+
"""HtmlDiff instance initializer
|
| 1691 |
+
|
| 1692 |
+
Arguments:
|
| 1693 |
+
tabsize -- tab stop spacing, defaults to 8.
|
| 1694 |
+
wrapcolumn -- column number where lines are broken and wrapped,
|
| 1695 |
+
defaults to None where lines are not wrapped.
|
| 1696 |
+
linejunk,charjunk -- keyword arguments passed into ndiff() (used by
|
| 1697 |
+
HtmlDiff() to generate the side by side HTML differences). See
|
| 1698 |
+
ndiff() documentation for argument default values and descriptions.
|
| 1699 |
+
"""
|
| 1700 |
+
self._tabsize = tabsize
|
| 1701 |
+
self._wrapcolumn = wrapcolumn
|
| 1702 |
+
self._linejunk = linejunk
|
| 1703 |
+
self._charjunk = charjunk
|
| 1704 |
+
|
| 1705 |
+
def make_file(self, fromlines, tolines, fromdesc='', todesc='',
|
| 1706 |
+
context=False, numlines=5, *, charset='utf-8'):
|
| 1707 |
+
"""Returns HTML file of side by side comparison with change highlights
|
| 1708 |
+
|
| 1709 |
+
Arguments:
|
| 1710 |
+
fromlines -- list of "from" lines
|
| 1711 |
+
tolines -- list of "to" lines
|
| 1712 |
+
fromdesc -- "from" file column header string
|
| 1713 |
+
todesc -- "to" file column header string
|
| 1714 |
+
context -- set to True for contextual differences (defaults to False
|
| 1715 |
+
which shows full differences).
|
| 1716 |
+
numlines -- number of context lines. When context is set True,
|
| 1717 |
+
controls number of lines displayed before and after the change.
|
| 1718 |
+
When context is False, controls the number of lines to place
|
| 1719 |
+
the "next" link anchors before the next change (so click of
|
| 1720 |
+
"next" link jumps to just before the change).
|
| 1721 |
+
charset -- charset of the HTML document
|
| 1722 |
+
"""
|
| 1723 |
+
|
| 1724 |
+
return (self._file_template % dict(
|
| 1725 |
+
styles=self._styles,
|
| 1726 |
+
legend=self._legend,
|
| 1727 |
+
table=self.make_table(fromlines, tolines, fromdesc, todesc,
|
| 1728 |
+
context=context, numlines=numlines),
|
| 1729 |
+
charset=charset
|
| 1730 |
+
)).encode(charset, 'xmlcharrefreplace').decode(charset)
|
| 1731 |
+
|
| 1732 |
+
def _tab_newline_replace(self,fromlines,tolines):
|
| 1733 |
+
"""Returns from/to line lists with tabs expanded and newlines removed.
|
| 1734 |
+
|
| 1735 |
+
Instead of tab characters being replaced by the number of spaces
|
| 1736 |
+
needed to fill in to the next tab stop, this function will fill
|
| 1737 |
+
the space with tab characters. This is done so that the difference
|
| 1738 |
+
algorithms can identify changes in a file when tabs are replaced by
|
| 1739 |
+
spaces and vice versa. At the end of the HTML generation, the tab
|
| 1740 |
+
characters will be replaced with a nonbreakable space.
|
| 1741 |
+
"""
|
| 1742 |
+
def expand_tabs(line):
|
| 1743 |
+
# hide real spaces
|
| 1744 |
+
line = line.replace(' ','\0')
|
| 1745 |
+
# expand tabs into spaces
|
| 1746 |
+
line = line.expandtabs(self._tabsize)
|
| 1747 |
+
# replace spaces from expanded tabs back into tab characters
|
| 1748 |
+
# (we'll replace them with markup after we do differencing)
|
| 1749 |
+
line = line.replace(' ','\t')
|
| 1750 |
+
return line.replace('\0',' ').rstrip('\n')
|
| 1751 |
+
fromlines = [expand_tabs(line) for line in fromlines]
|
| 1752 |
+
tolines = [expand_tabs(line) for line in tolines]
|
| 1753 |
+
return fromlines,tolines
|
| 1754 |
+
|
| 1755 |
+
def _split_line(self,data_list,line_num,text):
|
| 1756 |
+
"""Builds list of text lines by splitting text lines at wrap point
|
| 1757 |
+
|
| 1758 |
+
This function will determine if the input text line needs to be
|
| 1759 |
+
wrapped (split) into separate lines. If so, the first wrap point
|
| 1760 |
+
will be determined and the first line appended to the output
|
| 1761 |
+
text line list. This function is used recursively to handle
|
| 1762 |
+
the second part of the split line to further split it.
|
| 1763 |
+
"""
|
| 1764 |
+
# if blank line or context separator, just add it to the output list
|
| 1765 |
+
if not line_num:
|
| 1766 |
+
data_list.append((line_num,text))
|
| 1767 |
+
return
|
| 1768 |
+
|
| 1769 |
+
# if line text doesn't need wrapping, just add it to the output list
|
| 1770 |
+
size = len(text)
|
| 1771 |
+
max = self._wrapcolumn
|
| 1772 |
+
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
|
| 1773 |
+
data_list.append((line_num,text))
|
| 1774 |
+
return
|
| 1775 |
+
|
| 1776 |
+
# scan text looking for the wrap point, keeping track if the wrap
|
| 1777 |
+
# point is inside markers
|
| 1778 |
+
i = 0
|
| 1779 |
+
n = 0
|
| 1780 |
+
mark = ''
|
| 1781 |
+
while n < max and i < size:
|
| 1782 |
+
if text[i] == '\0':
|
| 1783 |
+
i += 1
|
| 1784 |
+
mark = text[i]
|
| 1785 |
+
i += 1
|
| 1786 |
+
elif text[i] == '\1':
|
| 1787 |
+
i += 1
|
| 1788 |
+
mark = ''
|
| 1789 |
+
else:
|
| 1790 |
+
i += 1
|
| 1791 |
+
n += 1
|
| 1792 |
+
|
| 1793 |
+
# wrap point is inside text, break it up into separate lines
|
| 1794 |
+
line1 = text[:i]
|
| 1795 |
+
line2 = text[i:]
|
| 1796 |
+
|
| 1797 |
+
# if wrap point is inside markers, place end marker at end of first
|
| 1798 |
+
# line and start marker at beginning of second line because each
|
| 1799 |
+
# line will have its own table tag markup around it.
|
| 1800 |
+
if mark:
|
| 1801 |
+
line1 = line1 + '\1'
|
| 1802 |
+
line2 = '\0' + mark + line2
|
| 1803 |
+
|
| 1804 |
+
# tack on first line onto the output list
|
| 1805 |
+
data_list.append((line_num,line1))
|
| 1806 |
+
|
| 1807 |
+
# use this routine again to wrap the remaining text
|
| 1808 |
+
self._split_line(data_list,'>',line2)
|
| 1809 |
+
|
| 1810 |
+
def _line_wrapper(self,diffs):
|
| 1811 |
+
"""Returns iterator that splits (wraps) mdiff text lines"""
|
| 1812 |
+
|
| 1813 |
+
# pull from/to data and flags from mdiff iterator
|
| 1814 |
+
for fromdata,todata,flag in diffs:
|
| 1815 |
+
# check for context separators and pass them through
|
| 1816 |
+
if flag is None:
|
| 1817 |
+
yield fromdata,todata,flag
|
| 1818 |
+
continue
|
| 1819 |
+
(fromline,fromtext),(toline,totext) = fromdata,todata
|
| 1820 |
+
# for each from/to line split it at the wrap column to form
|
| 1821 |
+
# list of text lines.
|
| 1822 |
+
fromlist,tolist = [],[]
|
| 1823 |
+
self._split_line(fromlist,fromline,fromtext)
|
| 1824 |
+
self._split_line(tolist,toline,totext)
|
| 1825 |
+
# yield from/to line in pairs inserting blank lines as
|
| 1826 |
+
# necessary when one side has more wrapped lines
|
| 1827 |
+
while fromlist or tolist:
|
| 1828 |
+
if fromlist:
|
| 1829 |
+
fromdata = fromlist.pop(0)
|
| 1830 |
+
else:
|
| 1831 |
+
fromdata = ('',' ')
|
| 1832 |
+
if tolist:
|
| 1833 |
+
todata = tolist.pop(0)
|
| 1834 |
+
else:
|
| 1835 |
+
todata = ('',' ')
|
| 1836 |
+
yield fromdata,todata,flag
|
| 1837 |
+
|
| 1838 |
+
def _collect_lines(self,diffs):
|
| 1839 |
+
"""Collects mdiff output into separate lists
|
| 1840 |
+
|
| 1841 |
+
Before storing the mdiff from/to data into a list, it is converted
|
| 1842 |
+
into a single line of text with HTML markup.
|
| 1843 |
+
"""
|
| 1844 |
+
|
| 1845 |
+
fromlist,tolist,flaglist = [],[],[]
|
| 1846 |
+
# pull from/to data and flags from mdiff style iterator
|
| 1847 |
+
for fromdata,todata,flag in diffs:
|
| 1848 |
+
try:
|
| 1849 |
+
# store HTML markup of the lines into the lists
|
| 1850 |
+
fromlist.append(self._format_line(0,flag,*fromdata))
|
| 1851 |
+
tolist.append(self._format_line(1,flag,*todata))
|
| 1852 |
+
except TypeError:
|
| 1853 |
+
# exceptions occur for lines where context separators go
|
| 1854 |
+
fromlist.append(None)
|
| 1855 |
+
tolist.append(None)
|
| 1856 |
+
flaglist.append(flag)
|
| 1857 |
+
return fromlist,tolist,flaglist
|
| 1858 |
+
|
| 1859 |
+
def _format_line(self,side,flag,linenum,text):
|
| 1860 |
+
"""Returns HTML markup of "from" / "to" text lines
|
| 1861 |
+
|
| 1862 |
+
side -- 0 or 1 indicating "from" or "to" text
|
| 1863 |
+
flag -- indicates if difference on line
|
| 1864 |
+
linenum -- line number (used for line number column)
|
| 1865 |
+
text -- line text to be marked up
|
| 1866 |
+
"""
|
| 1867 |
+
try:
|
| 1868 |
+
linenum = '%d' % linenum
|
| 1869 |
+
id = ' id="%s%s"' % (self._prefix[side],linenum)
|
| 1870 |
+
except TypeError:
|
| 1871 |
+
# handle blank lines where linenum is '>' or ''
|
| 1872 |
+
id = ''
|
| 1873 |
+
# replace those things that would get confused with HTML symbols
|
| 1874 |
+
text=text.replace("&","&").replace(">",">").replace("<","<")
|
| 1875 |
+
|
| 1876 |
+
# make space non-breakable so they don't get compressed or line wrapped
|
| 1877 |
+
text = text.replace(' ',' ').rstrip()
|
| 1878 |
+
|
| 1879 |
+
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
|
| 1880 |
+
% (id,linenum,text)
|
| 1881 |
+
|
| 1882 |
+
def _make_prefix(self):
|
| 1883 |
+
"""Create unique anchor prefixes"""
|
| 1884 |
+
|
| 1885 |
+
# Generate a unique anchor prefix so multiple tables
|
| 1886 |
+
# can exist on the same HTML page without conflicts.
|
| 1887 |
+
fromprefix = "from%d_" % HtmlDiff._default_prefix
|
| 1888 |
+
toprefix = "to%d_" % HtmlDiff._default_prefix
|
| 1889 |
+
HtmlDiff._default_prefix += 1
|
| 1890 |
+
# store prefixes so line format method has access
|
| 1891 |
+
self._prefix = [fromprefix,toprefix]
|
| 1892 |
+
|
| 1893 |
+
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
|
| 1894 |
+
"""Makes list of "next" links"""
|
| 1895 |
+
|
| 1896 |
+
# all anchor names will be generated using the unique "to" prefix
|
| 1897 |
+
toprefix = self._prefix[1]
|
| 1898 |
+
|
| 1899 |
+
# process change flags, generating middle column of next anchors/links
|
| 1900 |
+
next_id = ['']*len(flaglist)
|
| 1901 |
+
next_href = ['']*len(flaglist)
|
| 1902 |
+
num_chg, in_change = 0, False
|
| 1903 |
+
last = 0
|
| 1904 |
+
for i,flag in enumerate(flaglist):
|
| 1905 |
+
if flag:
|
| 1906 |
+
if not in_change:
|
| 1907 |
+
in_change = True
|
| 1908 |
+
last = i
|
| 1909 |
+
# at the beginning of a change, drop an anchor a few lines
|
| 1910 |
+
# (the context lines) before the change for the previous
|
| 1911 |
+
# link
|
| 1912 |
+
i = max([0,i-numlines])
|
| 1913 |
+
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
|
| 1914 |
+
# at the beginning of a change, drop a link to the next
|
| 1915 |
+
# change
|
| 1916 |
+
num_chg += 1
|
| 1917 |
+
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
|
| 1918 |
+
toprefix,num_chg)
|
| 1919 |
+
else:
|
| 1920 |
+
in_change = False
|
| 1921 |
+
# check for cases where there is no content to avoid exceptions
|
| 1922 |
+
if not flaglist:
|
| 1923 |
+
flaglist = [False]
|
| 1924 |
+
next_id = ['']
|
| 1925 |
+
next_href = ['']
|
| 1926 |
+
last = 0
|
| 1927 |
+
if context:
|
| 1928 |
+
fromlist = ['<td></td><td> No Differences Found </td>']
|
| 1929 |
+
tolist = fromlist
|
| 1930 |
+
else:
|
| 1931 |
+
fromlist = tolist = ['<td></td><td> Empty File </td>']
|
| 1932 |
+
# if not a change on first line, drop a link
|
| 1933 |
+
if not flaglist[0]:
|
| 1934 |
+
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
|
| 1935 |
+
# redo the last link to link to the top
|
| 1936 |
+
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
|
| 1937 |
+
|
| 1938 |
+
return fromlist,tolist,flaglist,next_href,next_id
|
| 1939 |
+
|
| 1940 |
+
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
|
| 1941 |
+
numlines=5):
|
| 1942 |
+
"""Returns HTML table of side by side comparison with change highlights
|
| 1943 |
+
|
| 1944 |
+
Arguments:
|
| 1945 |
+
fromlines -- list of "from" lines
|
| 1946 |
+
tolines -- list of "to" lines
|
| 1947 |
+
fromdesc -- "from" file column header string
|
| 1948 |
+
todesc -- "to" file column header string
|
| 1949 |
+
context -- set to True for contextual differences (defaults to False
|
| 1950 |
+
which shows full differences).
|
| 1951 |
+
numlines -- number of context lines. When context is set True,
|
| 1952 |
+
controls number of lines displayed before and after the change.
|
| 1953 |
+
When context is False, controls the number of lines to place
|
| 1954 |
+
the "next" link anchors before the next change (so click of
|
| 1955 |
+
"next" link jumps to just before the change).
|
| 1956 |
+
"""
|
| 1957 |
+
|
| 1958 |
+
# make unique anchor prefixes so that multiple tables may exist
|
| 1959 |
+
# on the same page without conflict.
|
| 1960 |
+
self._make_prefix()
|
| 1961 |
+
|
| 1962 |
+
# change tabs to spaces before it gets more difficult after we insert
|
| 1963 |
+
# markup
|
| 1964 |
+
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
|
| 1965 |
+
|
| 1966 |
+
# create diffs iterator which generates side by side from/to data
|
| 1967 |
+
if context:
|
| 1968 |
+
context_lines = numlines
|
| 1969 |
+
else:
|
| 1970 |
+
context_lines = None
|
| 1971 |
+
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
|
| 1972 |
+
charjunk=self._charjunk)
|
| 1973 |
+
|
| 1974 |
+
# set up iterator to wrap lines that exceed desired width
|
| 1975 |
+
if self._wrapcolumn:
|
| 1976 |
+
diffs = self._line_wrapper(diffs)
|
| 1977 |
+
|
| 1978 |
+
# collect up from/to lines and flags into lists (also format the lines)
|
| 1979 |
+
fromlist,tolist,flaglist = self._collect_lines(diffs)
|
| 1980 |
+
|
| 1981 |
+
# process change flags, generating middle column of next anchors/links
|
| 1982 |
+
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
|
| 1983 |
+
fromlist,tolist,flaglist,context,numlines)
|
| 1984 |
+
|
| 1985 |
+
s = []
|
| 1986 |
+
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
|
| 1987 |
+
'<td class="diff_next">%s</td>%s</tr>\n'
|
| 1988 |
+
for i in range(len(flaglist)):
|
| 1989 |
+
if flaglist[i] is None:
|
| 1990 |
+
# mdiff yields None on separator lines skip the bogus ones
|
| 1991 |
+
# generated for the first line
|
| 1992 |
+
if i > 0:
|
| 1993 |
+
s.append(' </tbody> \n <tbody>\n')
|
| 1994 |
+
else:
|
| 1995 |
+
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
|
| 1996 |
+
next_href[i],tolist[i]))
|
| 1997 |
+
if fromdesc or todesc:
|
| 1998 |
+
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
|
| 1999 |
+
'<th class="diff_next"><br /></th>',
|
| 2000 |
+
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
|
| 2001 |
+
'<th class="diff_next"><br /></th>',
|
| 2002 |
+
'<th colspan="2" class="diff_header">%s</th>' % todesc)
|
| 2003 |
+
else:
|
| 2004 |
+
header_row = ''
|
| 2005 |
+
|
| 2006 |
+
table = self._table_template % dict(
|
| 2007 |
+
data_rows=''.join(s),
|
| 2008 |
+
header_row=header_row,
|
| 2009 |
+
prefix=self._prefix[1])
|
| 2010 |
+
|
| 2011 |
+
return table.replace('\0+','<span class="diff_add">'). \
|
| 2012 |
+
replace('\0-','<span class="diff_sub">'). \
|
| 2013 |
+
replace('\0^','<span class="diff_chg">'). \
|
| 2014 |
+
replace('\1','</span>'). \
|
| 2015 |
+
replace('\t',' ')
|
| 2016 |
+
|
| 2017 |
+
del re
|
| 2018 |
+
|
| 2019 |
+
def restore(delta, which):
|
| 2020 |
+
r"""
|
| 2021 |
+
Generate one of the two sequences that generated a delta.
|
| 2022 |
+
|
| 2023 |
+
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
|
| 2024 |
+
lines originating from file 1 or 2 (parameter `which`), stripping off line
|
| 2025 |
+
prefixes.
|
| 2026 |
+
|
| 2027 |
+
Examples:
|
| 2028 |
+
|
| 2029 |
+
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
|
| 2030 |
+
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
|
| 2031 |
+
>>> diff = list(diff)
|
| 2032 |
+
>>> print(''.join(restore(diff, 1)), end="")
|
| 2033 |
+
one
|
| 2034 |
+
two
|
| 2035 |
+
three
|
| 2036 |
+
>>> print(''.join(restore(diff, 2)), end="")
|
| 2037 |
+
ore
|
| 2038 |
+
tree
|
| 2039 |
+
emu
|
| 2040 |
+
"""
|
| 2041 |
+
try:
|
| 2042 |
+
tag = {1: "- ", 2: "+ "}[int(which)]
|
| 2043 |
+
except KeyError:
|
| 2044 |
+
raise ValueError('unknown delta choice (must be 1 or 2): %r'
|
| 2045 |
+
% which) from None
|
| 2046 |
+
prefixes = (" ", tag)
|
| 2047 |
+
for line in delta:
|
| 2048 |
+
if line[:2] in prefixes:
|
| 2049 |
+
yield line[2:]
|
| 2050 |
+
|
| 2051 |
+
def _test():
|
| 2052 |
+
import doctest, difflib
|
| 2053 |
+
return doctest.testmod(difflib)
|
| 2054 |
+
|
| 2055 |
+
if __name__ == "__main__":
|
| 2056 |
+
_test()
|
evalkit_llava/lib/python3.10/distutils/README
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
This directory contains the Distutils package.
|
| 2 |
+
|
| 3 |
+
There's a full documentation available at:
|
| 4 |
+
|
| 5 |
+
https://docs.python.org/distutils/
|
| 6 |
+
|
| 7 |
+
The Distutils-SIG web page is also a good starting point:
|
| 8 |
+
|
| 9 |
+
https://www.python.org/sigs/distutils-sig/
|
| 10 |
+
|
| 11 |
+
$Id$
|
evalkit_llava/lib/python3.10/distutils/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils
|
| 2 |
+
|
| 3 |
+
The main package for the Python Module Distribution Utilities. Normally
|
| 4 |
+
used from a setup script as
|
| 5 |
+
|
| 6 |
+
from distutils.core import setup
|
| 7 |
+
|
| 8 |
+
setup (...)
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import sys
|
| 12 |
+
import warnings
|
| 13 |
+
|
| 14 |
+
__version__ = sys.version[:sys.version.index(' ')]
|
| 15 |
+
|
| 16 |
+
_DEPRECATION_MESSAGE = ("The distutils package is deprecated and slated for "
|
| 17 |
+
"removal in Python 3.12. Use setuptools or check "
|
| 18 |
+
"PEP 632 for potential alternatives")
|
| 19 |
+
warnings.warn(_DEPRECATION_MESSAGE,
|
| 20 |
+
DeprecationWarning, 2)
|
evalkit_llava/lib/python3.10/distutils/_msvccompiler.py
ADDED
|
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils._msvccompiler
|
| 2 |
+
|
| 3 |
+
Contains MSVCCompiler, an implementation of the abstract CCompiler class
|
| 4 |
+
for Microsoft Visual Studio 2015.
|
| 5 |
+
|
| 6 |
+
The module is compatible with VS 2015 and later. You can find legacy support
|
| 7 |
+
for older versions in distutils.msvc9compiler and distutils.msvccompiler.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
# Written by Perry Stoll
|
| 11 |
+
# hacked by Robin Becker and Thomas Heller to do a better job of
|
| 12 |
+
# finding DevStudio (through the registry)
|
| 13 |
+
# ported to VS 2005 and VS 2008 by Christian Heimes
|
| 14 |
+
# ported to VS 2015 by Steve Dower
|
| 15 |
+
|
| 16 |
+
import os
|
| 17 |
+
import subprocess
|
| 18 |
+
import winreg
|
| 19 |
+
|
| 20 |
+
from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
|
| 21 |
+
CompileError, LibError, LinkError
|
| 22 |
+
from distutils.ccompiler import CCompiler, gen_lib_options
|
| 23 |
+
from distutils import log
|
| 24 |
+
from distutils.util import get_platform
|
| 25 |
+
|
| 26 |
+
from itertools import count
|
| 27 |
+
|
| 28 |
+
def _find_vc2015():
|
| 29 |
+
try:
|
| 30 |
+
key = winreg.OpenKeyEx(
|
| 31 |
+
winreg.HKEY_LOCAL_MACHINE,
|
| 32 |
+
r"Software\Microsoft\VisualStudio\SxS\VC7",
|
| 33 |
+
access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY
|
| 34 |
+
)
|
| 35 |
+
except OSError:
|
| 36 |
+
log.debug("Visual C++ is not registered")
|
| 37 |
+
return None, None
|
| 38 |
+
|
| 39 |
+
best_version = 0
|
| 40 |
+
best_dir = None
|
| 41 |
+
with key:
|
| 42 |
+
for i in count():
|
| 43 |
+
try:
|
| 44 |
+
v, vc_dir, vt = winreg.EnumValue(key, i)
|
| 45 |
+
except OSError:
|
| 46 |
+
break
|
| 47 |
+
if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir):
|
| 48 |
+
try:
|
| 49 |
+
version = int(float(v))
|
| 50 |
+
except (ValueError, TypeError):
|
| 51 |
+
continue
|
| 52 |
+
if version >= 14 and version > best_version:
|
| 53 |
+
best_version, best_dir = version, vc_dir
|
| 54 |
+
return best_version, best_dir
|
| 55 |
+
|
| 56 |
+
def _find_vc2017():
|
| 57 |
+
"""Returns "15, path" based on the result of invoking vswhere.exe
|
| 58 |
+
If no install is found, returns "None, None"
|
| 59 |
+
|
| 60 |
+
The version is returned to avoid unnecessarily changing the function
|
| 61 |
+
result. It may be ignored when the path is not None.
|
| 62 |
+
|
| 63 |
+
If vswhere.exe is not available, by definition, VS 2017 is not
|
| 64 |
+
installed.
|
| 65 |
+
"""
|
| 66 |
+
root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")
|
| 67 |
+
if not root:
|
| 68 |
+
return None, None
|
| 69 |
+
|
| 70 |
+
try:
|
| 71 |
+
path = subprocess.check_output([
|
| 72 |
+
os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
|
| 73 |
+
"-latest",
|
| 74 |
+
"-prerelease",
|
| 75 |
+
"-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
|
| 76 |
+
"-property", "installationPath",
|
| 77 |
+
"-products", "*",
|
| 78 |
+
], encoding="mbcs", errors="strict").strip()
|
| 79 |
+
except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
|
| 80 |
+
return None, None
|
| 81 |
+
|
| 82 |
+
path = os.path.join(path, "VC", "Auxiliary", "Build")
|
| 83 |
+
if os.path.isdir(path):
|
| 84 |
+
return 15, path
|
| 85 |
+
|
| 86 |
+
return None, None
|
| 87 |
+
|
| 88 |
+
PLAT_SPEC_TO_RUNTIME = {
|
| 89 |
+
'x86' : 'x86',
|
| 90 |
+
'x86_amd64' : 'x64',
|
| 91 |
+
'x86_arm' : 'arm',
|
| 92 |
+
'x86_arm64' : 'arm64'
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
def _find_vcvarsall(plat_spec):
|
| 96 |
+
# bpo-38597: Removed vcruntime return value
|
| 97 |
+
_, best_dir = _find_vc2017()
|
| 98 |
+
|
| 99 |
+
if not best_dir:
|
| 100 |
+
best_version, best_dir = _find_vc2015()
|
| 101 |
+
|
| 102 |
+
if not best_dir:
|
| 103 |
+
log.debug("No suitable Visual C++ version found")
|
| 104 |
+
return None, None
|
| 105 |
+
|
| 106 |
+
vcvarsall = os.path.join(best_dir, "vcvarsall.bat")
|
| 107 |
+
if not os.path.isfile(vcvarsall):
|
| 108 |
+
log.debug("%s cannot be found", vcvarsall)
|
| 109 |
+
return None, None
|
| 110 |
+
|
| 111 |
+
return vcvarsall, None
|
| 112 |
+
|
| 113 |
+
def _get_vc_env(plat_spec):
|
| 114 |
+
if os.getenv("DISTUTILS_USE_SDK"):
|
| 115 |
+
return {
|
| 116 |
+
key.lower(): value
|
| 117 |
+
for key, value in os.environ.items()
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
vcvarsall, _ = _find_vcvarsall(plat_spec)
|
| 121 |
+
if not vcvarsall:
|
| 122 |
+
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
|
| 123 |
+
|
| 124 |
+
try:
|
| 125 |
+
out = subprocess.check_output(
|
| 126 |
+
'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
|
| 127 |
+
stderr=subprocess.STDOUT,
|
| 128 |
+
).decode('utf-16le', errors='replace')
|
| 129 |
+
except subprocess.CalledProcessError as exc:
|
| 130 |
+
log.error(exc.output)
|
| 131 |
+
raise DistutilsPlatformError("Error executing {}"
|
| 132 |
+
.format(exc.cmd))
|
| 133 |
+
|
| 134 |
+
env = {
|
| 135 |
+
key.lower(): value
|
| 136 |
+
for key, _, value in
|
| 137 |
+
(line.partition('=') for line in out.splitlines())
|
| 138 |
+
if key and value
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
return env
|
| 142 |
+
|
| 143 |
+
def _find_exe(exe, paths=None):
|
| 144 |
+
"""Return path to an MSVC executable program.
|
| 145 |
+
|
| 146 |
+
Tries to find the program in several places: first, one of the
|
| 147 |
+
MSVC program search paths from the registry; next, the directories
|
| 148 |
+
in the PATH environment variable. If any of those work, return an
|
| 149 |
+
absolute path that is known to exist. If none of them work, just
|
| 150 |
+
return the original program name, 'exe'.
|
| 151 |
+
"""
|
| 152 |
+
if not paths:
|
| 153 |
+
paths = os.getenv('path').split(os.pathsep)
|
| 154 |
+
for p in paths:
|
| 155 |
+
fn = os.path.join(os.path.abspath(p), exe)
|
| 156 |
+
if os.path.isfile(fn):
|
| 157 |
+
return fn
|
| 158 |
+
return exe
|
| 159 |
+
|
| 160 |
+
# A map keyed by get_platform() return values to values accepted by
|
| 161 |
+
# 'vcvarsall.bat'. Always cross-compile from x86 to work with the
|
| 162 |
+
# lighter-weight MSVC installs that do not include native 64-bit tools.
|
| 163 |
+
PLAT_TO_VCVARS = {
|
| 164 |
+
'win32' : 'x86',
|
| 165 |
+
'win-amd64' : 'x86_amd64',
|
| 166 |
+
'win-arm32' : 'x86_arm',
|
| 167 |
+
'win-arm64' : 'x86_arm64'
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
class MSVCCompiler(CCompiler) :
|
| 171 |
+
"""Concrete class that implements an interface to Microsoft Visual C++,
|
| 172 |
+
as defined by the CCompiler abstract class."""
|
| 173 |
+
|
| 174 |
+
compiler_type = 'msvc'
|
| 175 |
+
|
| 176 |
+
# Just set this so CCompiler's constructor doesn't barf. We currently
|
| 177 |
+
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
|
| 178 |
+
# as it really isn't necessary for this sort of single-compiler class.
|
| 179 |
+
# Would be nice to have a consistent interface with UnixCCompiler,
|
| 180 |
+
# though, so it's worth thinking about.
|
| 181 |
+
executables = {}
|
| 182 |
+
|
| 183 |
+
# Private class data (need to distinguish C from C++ source for compiler)
|
| 184 |
+
_c_extensions = ['.c']
|
| 185 |
+
_cpp_extensions = ['.cc', '.cpp', '.cxx']
|
| 186 |
+
_rc_extensions = ['.rc']
|
| 187 |
+
_mc_extensions = ['.mc']
|
| 188 |
+
|
| 189 |
+
# Needed for the filename generation methods provided by the
|
| 190 |
+
# base class, CCompiler.
|
| 191 |
+
src_extensions = (_c_extensions + _cpp_extensions +
|
| 192 |
+
_rc_extensions + _mc_extensions)
|
| 193 |
+
res_extension = '.res'
|
| 194 |
+
obj_extension = '.obj'
|
| 195 |
+
static_lib_extension = '.lib'
|
| 196 |
+
shared_lib_extension = '.dll'
|
| 197 |
+
static_lib_format = shared_lib_format = '%s%s'
|
| 198 |
+
exe_extension = '.exe'
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def __init__(self, verbose=0, dry_run=0, force=0):
|
| 202 |
+
CCompiler.__init__ (self, verbose, dry_run, force)
|
| 203 |
+
# target platform (.plat_name is consistent with 'bdist')
|
| 204 |
+
self.plat_name = None
|
| 205 |
+
self.initialized = False
|
| 206 |
+
|
| 207 |
+
def initialize(self, plat_name=None):
|
| 208 |
+
# multi-init means we would need to check platform same each time...
|
| 209 |
+
assert not self.initialized, "don't init multiple times"
|
| 210 |
+
if plat_name is None:
|
| 211 |
+
plat_name = get_platform()
|
| 212 |
+
# sanity check for platforms to prevent obscure errors later.
|
| 213 |
+
if plat_name not in PLAT_TO_VCVARS:
|
| 214 |
+
raise DistutilsPlatformError("--plat-name must be one of {}"
|
| 215 |
+
.format(tuple(PLAT_TO_VCVARS)))
|
| 216 |
+
|
| 217 |
+
# Get the vcvarsall.bat spec for the requested platform.
|
| 218 |
+
plat_spec = PLAT_TO_VCVARS[plat_name]
|
| 219 |
+
|
| 220 |
+
vc_env = _get_vc_env(plat_spec)
|
| 221 |
+
if not vc_env:
|
| 222 |
+
raise DistutilsPlatformError("Unable to find a compatible "
|
| 223 |
+
"Visual Studio installation.")
|
| 224 |
+
|
| 225 |
+
self._paths = vc_env.get('path', '')
|
| 226 |
+
paths = self._paths.split(os.pathsep)
|
| 227 |
+
self.cc = _find_exe("cl.exe", paths)
|
| 228 |
+
self.linker = _find_exe("link.exe", paths)
|
| 229 |
+
self.lib = _find_exe("lib.exe", paths)
|
| 230 |
+
self.rc = _find_exe("rc.exe", paths) # resource compiler
|
| 231 |
+
self.mc = _find_exe("mc.exe", paths) # message compiler
|
| 232 |
+
self.mt = _find_exe("mt.exe", paths) # message compiler
|
| 233 |
+
|
| 234 |
+
for dir in vc_env.get('include', '').split(os.pathsep):
|
| 235 |
+
if dir:
|
| 236 |
+
self.add_include_dir(dir.rstrip(os.sep))
|
| 237 |
+
|
| 238 |
+
for dir in vc_env.get('lib', '').split(os.pathsep):
|
| 239 |
+
if dir:
|
| 240 |
+
self.add_library_dir(dir.rstrip(os.sep))
|
| 241 |
+
|
| 242 |
+
self.preprocess_options = None
|
| 243 |
+
# bpo-38597: Always compile with dynamic linking
|
| 244 |
+
# Future releases of Python 3.x will include all past
|
| 245 |
+
# versions of vcruntime*.dll for compatibility.
|
| 246 |
+
self.compile_options = [
|
| 247 |
+
'/nologo', '/Ox', '/W3', '/GL', '/DNDEBUG', '/MD'
|
| 248 |
+
]
|
| 249 |
+
|
| 250 |
+
self.compile_options_debug = [
|
| 251 |
+
'/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG'
|
| 252 |
+
]
|
| 253 |
+
|
| 254 |
+
ldflags = [
|
| 255 |
+
'/nologo', '/INCREMENTAL:NO', '/LTCG'
|
| 256 |
+
]
|
| 257 |
+
|
| 258 |
+
ldflags_debug = [
|
| 259 |
+
'/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'
|
| 260 |
+
]
|
| 261 |
+
|
| 262 |
+
self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1']
|
| 263 |
+
self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1']
|
| 264 |
+
self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
|
| 265 |
+
self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
|
| 266 |
+
self.ldflags_static = [*ldflags]
|
| 267 |
+
self.ldflags_static_debug = [*ldflags_debug]
|
| 268 |
+
|
| 269 |
+
self._ldflags = {
|
| 270 |
+
(CCompiler.EXECUTABLE, None): self.ldflags_exe,
|
| 271 |
+
(CCompiler.EXECUTABLE, False): self.ldflags_exe,
|
| 272 |
+
(CCompiler.EXECUTABLE, True): self.ldflags_exe_debug,
|
| 273 |
+
(CCompiler.SHARED_OBJECT, None): self.ldflags_shared,
|
| 274 |
+
(CCompiler.SHARED_OBJECT, False): self.ldflags_shared,
|
| 275 |
+
(CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug,
|
| 276 |
+
(CCompiler.SHARED_LIBRARY, None): self.ldflags_static,
|
| 277 |
+
(CCompiler.SHARED_LIBRARY, False): self.ldflags_static,
|
| 278 |
+
(CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug,
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
self.initialized = True
|
| 282 |
+
|
| 283 |
+
# -- Worker methods ------------------------------------------------
|
| 284 |
+
|
| 285 |
+
def object_filenames(self,
|
| 286 |
+
source_filenames,
|
| 287 |
+
strip_dir=0,
|
| 288 |
+
output_dir=''):
|
| 289 |
+
ext_map = {
|
| 290 |
+
**{ext: self.obj_extension for ext in self.src_extensions},
|
| 291 |
+
**{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions},
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
output_dir = output_dir or ''
|
| 295 |
+
|
| 296 |
+
def make_out_path(p):
|
| 297 |
+
base, ext = os.path.splitext(p)
|
| 298 |
+
if strip_dir:
|
| 299 |
+
base = os.path.basename(base)
|
| 300 |
+
else:
|
| 301 |
+
_, base = os.path.splitdrive(base)
|
| 302 |
+
if base.startswith((os.path.sep, os.path.altsep)):
|
| 303 |
+
base = base[1:]
|
| 304 |
+
try:
|
| 305 |
+
# XXX: This may produce absurdly long paths. We should check
|
| 306 |
+
# the length of the result and trim base until we fit within
|
| 307 |
+
# 260 characters.
|
| 308 |
+
return os.path.join(output_dir, base + ext_map[ext])
|
| 309 |
+
except LookupError:
|
| 310 |
+
# Better to raise an exception instead of silently continuing
|
| 311 |
+
# and later complain about sources and targets having
|
| 312 |
+
# different lengths
|
| 313 |
+
raise CompileError("Don't know how to compile {}".format(p))
|
| 314 |
+
|
| 315 |
+
return list(map(make_out_path, source_filenames))
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def compile(self, sources,
|
| 319 |
+
output_dir=None, macros=None, include_dirs=None, debug=0,
|
| 320 |
+
extra_preargs=None, extra_postargs=None, depends=None):
|
| 321 |
+
|
| 322 |
+
if not self.initialized:
|
| 323 |
+
self.initialize()
|
| 324 |
+
compile_info = self._setup_compile(output_dir, macros, include_dirs,
|
| 325 |
+
sources, depends, extra_postargs)
|
| 326 |
+
macros, objects, extra_postargs, pp_opts, build = compile_info
|
| 327 |
+
|
| 328 |
+
compile_opts = extra_preargs or []
|
| 329 |
+
compile_opts.append('/c')
|
| 330 |
+
if debug:
|
| 331 |
+
compile_opts.extend(self.compile_options_debug)
|
| 332 |
+
else:
|
| 333 |
+
compile_opts.extend(self.compile_options)
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
add_cpp_opts = False
|
| 337 |
+
|
| 338 |
+
for obj in objects:
|
| 339 |
+
try:
|
| 340 |
+
src, ext = build[obj]
|
| 341 |
+
except KeyError:
|
| 342 |
+
continue
|
| 343 |
+
if debug:
|
| 344 |
+
# pass the full pathname to MSVC in debug mode,
|
| 345 |
+
# this allows the debugger to find the source file
|
| 346 |
+
# without asking the user to browse for it
|
| 347 |
+
src = os.path.abspath(src)
|
| 348 |
+
|
| 349 |
+
# Anaconda/conda-forge customisation, we want our pdbs to be
|
| 350 |
+
# relocatable:
|
| 351 |
+
# https://developercommunity.visualstudio.com/comments/623156/view.html
|
| 352 |
+
d1trimfile_opts = []
|
| 353 |
+
if 'SRC_DIR' in os.environ and os.path.basename(self.cc) == "cl.exe":
|
| 354 |
+
d1trimfile_opts.append("/d1trimfile:" + os.environ['SRC_DIR'])
|
| 355 |
+
|
| 356 |
+
if ext in self._c_extensions:
|
| 357 |
+
input_opt = "/Tc" + src
|
| 358 |
+
elif ext in self._cpp_extensions:
|
| 359 |
+
input_opt = "/Tp" + src
|
| 360 |
+
add_cpp_opts = True
|
| 361 |
+
elif ext in self._rc_extensions:
|
| 362 |
+
# compile .RC to .RES file
|
| 363 |
+
input_opt = src
|
| 364 |
+
output_opt = "/fo" + obj
|
| 365 |
+
try:
|
| 366 |
+
self.spawn([self.rc] + pp_opts + [output_opt, input_opt])
|
| 367 |
+
except DistutilsExecError as msg:
|
| 368 |
+
raise CompileError(msg)
|
| 369 |
+
continue
|
| 370 |
+
elif ext in self._mc_extensions:
|
| 371 |
+
# Compile .MC to .RC file to .RES file.
|
| 372 |
+
# * '-h dir' specifies the directory for the
|
| 373 |
+
# generated include file
|
| 374 |
+
# * '-r dir' specifies the target directory of the
|
| 375 |
+
# generated RC file and the binary message resource
|
| 376 |
+
# it includes
|
| 377 |
+
#
|
| 378 |
+
# For now (since there are no options to change this),
|
| 379 |
+
# we use the source-directory for the include file and
|
| 380 |
+
# the build directory for the RC file and message
|
| 381 |
+
# resources. This works at least for win32all.
|
| 382 |
+
h_dir = os.path.dirname(src)
|
| 383 |
+
rc_dir = os.path.dirname(obj)
|
| 384 |
+
try:
|
| 385 |
+
# first compile .MC to .RC and .H file
|
| 386 |
+
self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])
|
| 387 |
+
base, _ = os.path.splitext(os.path.basename (src))
|
| 388 |
+
rc_file = os.path.join(rc_dir, base + '.rc')
|
| 389 |
+
# then compile .RC to .RES file
|
| 390 |
+
self.spawn([self.rc, "/fo" + obj, rc_file])
|
| 391 |
+
|
| 392 |
+
except DistutilsExecError as msg:
|
| 393 |
+
raise CompileError(msg)
|
| 394 |
+
continue
|
| 395 |
+
else:
|
| 396 |
+
# how to handle this file?
|
| 397 |
+
raise CompileError("Don't know how to compile {} to {}"
|
| 398 |
+
.format(src, obj))
|
| 399 |
+
|
| 400 |
+
args = [self.cc] + compile_opts + pp_opts + d1trimfile_opts
|
| 401 |
+
if add_cpp_opts:
|
| 402 |
+
args.append('/EHsc')
|
| 403 |
+
args.append(input_opt)
|
| 404 |
+
args.append("/Fo" + obj)
|
| 405 |
+
args.extend(extra_postargs)
|
| 406 |
+
|
| 407 |
+
try:
|
| 408 |
+
self.spawn(args)
|
| 409 |
+
except DistutilsExecError as msg:
|
| 410 |
+
raise CompileError(msg)
|
| 411 |
+
|
| 412 |
+
return objects
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def create_static_lib(self,
|
| 416 |
+
objects,
|
| 417 |
+
output_libname,
|
| 418 |
+
output_dir=None,
|
| 419 |
+
debug=0,
|
| 420 |
+
target_lang=None):
|
| 421 |
+
|
| 422 |
+
if not self.initialized:
|
| 423 |
+
self.initialize()
|
| 424 |
+
objects, output_dir = self._fix_object_args(objects, output_dir)
|
| 425 |
+
output_filename = self.library_filename(output_libname,
|
| 426 |
+
output_dir=output_dir)
|
| 427 |
+
|
| 428 |
+
if self._need_link(objects, output_filename):
|
| 429 |
+
lib_args = objects + ['/OUT:' + output_filename]
|
| 430 |
+
if debug:
|
| 431 |
+
pass # XXX what goes here?
|
| 432 |
+
try:
|
| 433 |
+
log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args))
|
| 434 |
+
self.spawn([self.lib] + lib_args)
|
| 435 |
+
except DistutilsExecError as msg:
|
| 436 |
+
raise LibError(msg)
|
| 437 |
+
else:
|
| 438 |
+
log.debug("skipping %s (up-to-date)", output_filename)
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def link(self,
|
| 442 |
+
target_desc,
|
| 443 |
+
objects,
|
| 444 |
+
output_filename,
|
| 445 |
+
output_dir=None,
|
| 446 |
+
libraries=None,
|
| 447 |
+
library_dirs=None,
|
| 448 |
+
runtime_library_dirs=None,
|
| 449 |
+
export_symbols=None,
|
| 450 |
+
debug=0,
|
| 451 |
+
extra_preargs=None,
|
| 452 |
+
extra_postargs=None,
|
| 453 |
+
build_temp=None,
|
| 454 |
+
target_lang=None):
|
| 455 |
+
|
| 456 |
+
if not self.initialized:
|
| 457 |
+
self.initialize()
|
| 458 |
+
objects, output_dir = self._fix_object_args(objects, output_dir)
|
| 459 |
+
fixed_args = self._fix_lib_args(libraries, library_dirs,
|
| 460 |
+
runtime_library_dirs)
|
| 461 |
+
libraries, library_dirs, runtime_library_dirs = fixed_args
|
| 462 |
+
|
| 463 |
+
if runtime_library_dirs:
|
| 464 |
+
self.warn("I don't know what to do with 'runtime_library_dirs': "
|
| 465 |
+
+ str(runtime_library_dirs))
|
| 466 |
+
|
| 467 |
+
lib_opts = gen_lib_options(self,
|
| 468 |
+
library_dirs, runtime_library_dirs,
|
| 469 |
+
libraries)
|
| 470 |
+
if output_dir is not None:
|
| 471 |
+
output_filename = os.path.join(output_dir, output_filename)
|
| 472 |
+
|
| 473 |
+
if self._need_link(objects, output_filename):
|
| 474 |
+
ldflags = self._ldflags[target_desc, debug]
|
| 475 |
+
|
| 476 |
+
export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])]
|
| 477 |
+
|
| 478 |
+
ld_args = (ldflags + lib_opts + export_opts +
|
| 479 |
+
objects + ['/OUT:' + output_filename])
|
| 480 |
+
|
| 481 |
+
# The MSVC linker generates .lib and .exp files, which cannot be
|
| 482 |
+
# suppressed by any linker switches. The .lib files may even be
|
| 483 |
+
# needed! Make sure they are generated in the temporary build
|
| 484 |
+
# directory. Since they have different names for debug and release
|
| 485 |
+
# builds, they can go into the same directory.
|
| 486 |
+
build_temp = os.path.dirname(objects[0])
|
| 487 |
+
if export_symbols is not None:
|
| 488 |
+
(dll_name, dll_ext) = os.path.splitext(
|
| 489 |
+
os.path.basename(output_filename))
|
| 490 |
+
implib_file = os.path.join(
|
| 491 |
+
build_temp,
|
| 492 |
+
self.library_filename(dll_name))
|
| 493 |
+
ld_args.append ('/IMPLIB:' + implib_file)
|
| 494 |
+
|
| 495 |
+
if extra_preargs:
|
| 496 |
+
ld_args[:0] = extra_preargs
|
| 497 |
+
if extra_postargs:
|
| 498 |
+
ld_args.extend(extra_postargs)
|
| 499 |
+
|
| 500 |
+
output_dir = os.path.dirname(os.path.abspath(output_filename))
|
| 501 |
+
self.mkpath(output_dir)
|
| 502 |
+
try:
|
| 503 |
+
log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args))
|
| 504 |
+
self.spawn([self.linker] + ld_args)
|
| 505 |
+
except DistutilsExecError as msg:
|
| 506 |
+
raise LinkError(msg)
|
| 507 |
+
else:
|
| 508 |
+
log.debug("skipping %s (up-to-date)", output_filename)
|
| 509 |
+
|
| 510 |
+
def spawn(self, cmd):
|
| 511 |
+
old_path = os.getenv('path')
|
| 512 |
+
try:
|
| 513 |
+
os.environ['path'] = self._paths
|
| 514 |
+
return super().spawn(cmd)
|
| 515 |
+
finally:
|
| 516 |
+
os.environ['path'] = old_path
|
| 517 |
+
|
| 518 |
+
# -- Miscellaneous methods -----------------------------------------
|
| 519 |
+
# These are all used by the 'gen_lib_options() function, in
|
| 520 |
+
# ccompiler.py.
|
| 521 |
+
|
| 522 |
+
def library_dir_option(self, dir):
|
| 523 |
+
return "/LIBPATH:" + dir
|
| 524 |
+
|
| 525 |
+
def runtime_library_dir_option(self, dir):
|
| 526 |
+
raise DistutilsPlatformError(
|
| 527 |
+
"don't know how to set runtime library search path for MSVC")
|
| 528 |
+
|
| 529 |
+
def library_option(self, lib):
|
| 530 |
+
return self.library_filename(lib)
|
| 531 |
+
|
| 532 |
+
def find_library_file(self, dirs, lib, debug=0):
|
| 533 |
+
# Prefer a debugging library if found (and requested), but deal
|
| 534 |
+
# with it if we don't have one.
|
| 535 |
+
if debug:
|
| 536 |
+
try_names = [lib + "_d", lib]
|
| 537 |
+
else:
|
| 538 |
+
try_names = [lib]
|
| 539 |
+
for dir in dirs:
|
| 540 |
+
for name in try_names:
|
| 541 |
+
libfile = os.path.join(dir, self.library_filename(name))
|
| 542 |
+
if os.path.isfile(libfile):
|
| 543 |
+
return libfile
|
| 544 |
+
else:
|
| 545 |
+
# Oops, didn't find it in *any* of 'dirs'
|
| 546 |
+
return None
|
evalkit_llava/lib/python3.10/distutils/archive_util.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.archive_util
|
| 2 |
+
|
| 3 |
+
Utility functions for creating archive files (tarballs, zip files,
|
| 4 |
+
that sort of thing)."""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
from warnings import warn
|
| 8 |
+
import sys
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import zipfile
|
| 12 |
+
except ImportError:
|
| 13 |
+
zipfile = None
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from distutils.errors import DistutilsExecError
|
| 17 |
+
from distutils.spawn import spawn
|
| 18 |
+
from distutils.dir_util import mkpath
|
| 19 |
+
from distutils import log
|
| 20 |
+
|
| 21 |
+
try:
|
| 22 |
+
from pwd import getpwnam
|
| 23 |
+
except ImportError:
|
| 24 |
+
getpwnam = None
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
from grp import getgrnam
|
| 28 |
+
except ImportError:
|
| 29 |
+
getgrnam = None
|
| 30 |
+
|
| 31 |
+
def _get_gid(name):
|
| 32 |
+
"""Returns a gid, given a group name."""
|
| 33 |
+
if getgrnam is None or name is None:
|
| 34 |
+
return None
|
| 35 |
+
try:
|
| 36 |
+
result = getgrnam(name)
|
| 37 |
+
except KeyError:
|
| 38 |
+
result = None
|
| 39 |
+
if result is not None:
|
| 40 |
+
return result[2]
|
| 41 |
+
return None
|
| 42 |
+
|
| 43 |
+
def _get_uid(name):
|
| 44 |
+
"""Returns an uid, given a user name."""
|
| 45 |
+
if getpwnam is None or name is None:
|
| 46 |
+
return None
|
| 47 |
+
try:
|
| 48 |
+
result = getpwnam(name)
|
| 49 |
+
except KeyError:
|
| 50 |
+
result = None
|
| 51 |
+
if result is not None:
|
| 52 |
+
return result[2]
|
| 53 |
+
return None
|
| 54 |
+
|
| 55 |
+
def make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
|
| 56 |
+
owner=None, group=None):
|
| 57 |
+
"""Create a (possibly compressed) tar file from all the files under
|
| 58 |
+
'base_dir'.
|
| 59 |
+
|
| 60 |
+
'compress' must be "gzip" (the default), "bzip2", "xz", "compress", or
|
| 61 |
+
None. ("compress" will be deprecated in Python 3.2)
|
| 62 |
+
|
| 63 |
+
'owner' and 'group' can be used to define an owner and a group for the
|
| 64 |
+
archive that is being built. If not provided, the current owner and group
|
| 65 |
+
will be used.
|
| 66 |
+
|
| 67 |
+
The output tar file will be named 'base_dir' + ".tar", possibly plus
|
| 68 |
+
the appropriate compression extension (".gz", ".bz2", ".xz" or ".Z").
|
| 69 |
+
|
| 70 |
+
Returns the output filename.
|
| 71 |
+
"""
|
| 72 |
+
tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', 'xz': 'xz', None: '',
|
| 73 |
+
'compress': ''}
|
| 74 |
+
compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz',
|
| 75 |
+
'compress': '.Z'}
|
| 76 |
+
|
| 77 |
+
# flags for compression program, each element of list will be an argument
|
| 78 |
+
if compress is not None and compress not in compress_ext.keys():
|
| 79 |
+
raise ValueError(
|
| 80 |
+
"bad value for 'compress': must be None, 'gzip', 'bzip2', "
|
| 81 |
+
"'xz' or 'compress'")
|
| 82 |
+
|
| 83 |
+
archive_name = base_name + '.tar'
|
| 84 |
+
if compress != 'compress':
|
| 85 |
+
archive_name += compress_ext.get(compress, '')
|
| 86 |
+
|
| 87 |
+
mkpath(os.path.dirname(archive_name), dry_run=dry_run)
|
| 88 |
+
|
| 89 |
+
# creating the tarball
|
| 90 |
+
import tarfile # late import so Python build itself doesn't break
|
| 91 |
+
|
| 92 |
+
log.info('Creating tar archive')
|
| 93 |
+
|
| 94 |
+
uid = _get_uid(owner)
|
| 95 |
+
gid = _get_gid(group)
|
| 96 |
+
|
| 97 |
+
def _set_uid_gid(tarinfo):
|
| 98 |
+
if gid is not None:
|
| 99 |
+
tarinfo.gid = gid
|
| 100 |
+
tarinfo.gname = group
|
| 101 |
+
if uid is not None:
|
| 102 |
+
tarinfo.uid = uid
|
| 103 |
+
tarinfo.uname = owner
|
| 104 |
+
return tarinfo
|
| 105 |
+
|
| 106 |
+
if not dry_run:
|
| 107 |
+
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
|
| 108 |
+
try:
|
| 109 |
+
tar.add(base_dir, filter=_set_uid_gid)
|
| 110 |
+
finally:
|
| 111 |
+
tar.close()
|
| 112 |
+
|
| 113 |
+
# compression using `compress`
|
| 114 |
+
if compress == 'compress':
|
| 115 |
+
warn("'compress' will be deprecated.", PendingDeprecationWarning)
|
| 116 |
+
# the option varies depending on the platform
|
| 117 |
+
compressed_name = archive_name + compress_ext[compress]
|
| 118 |
+
if sys.platform == 'win32':
|
| 119 |
+
cmd = [compress, archive_name, compressed_name]
|
| 120 |
+
else:
|
| 121 |
+
cmd = [compress, '-f', archive_name]
|
| 122 |
+
spawn(cmd, dry_run=dry_run)
|
| 123 |
+
return compressed_name
|
| 124 |
+
|
| 125 |
+
return archive_name
|
| 126 |
+
|
| 127 |
+
def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):
|
| 128 |
+
"""Create a zip file from all the files under 'base_dir'.
|
| 129 |
+
|
| 130 |
+
The output zip file will be named 'base_name' + ".zip". Uses either the
|
| 131 |
+
"zipfile" Python module (if available) or the InfoZIP "zip" utility
|
| 132 |
+
(if installed and found on the default search path). If neither tool is
|
| 133 |
+
available, raises DistutilsExecError. Returns the name of the output zip
|
| 134 |
+
file.
|
| 135 |
+
"""
|
| 136 |
+
zip_filename = base_name + ".zip"
|
| 137 |
+
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
|
| 138 |
+
|
| 139 |
+
# If zipfile module is not available, try spawning an external
|
| 140 |
+
# 'zip' command.
|
| 141 |
+
if zipfile is None:
|
| 142 |
+
if verbose:
|
| 143 |
+
zipoptions = "-r"
|
| 144 |
+
else:
|
| 145 |
+
zipoptions = "-rq"
|
| 146 |
+
|
| 147 |
+
try:
|
| 148 |
+
spawn(["zip", zipoptions, zip_filename, base_dir],
|
| 149 |
+
dry_run=dry_run)
|
| 150 |
+
except DistutilsExecError:
|
| 151 |
+
# XXX really should distinguish between "couldn't find
|
| 152 |
+
# external 'zip' command" and "zip failed".
|
| 153 |
+
raise DistutilsExecError(("unable to create zip file '%s': "
|
| 154 |
+
"could neither import the 'zipfile' module nor "
|
| 155 |
+
"find a standalone zip utility") % zip_filename)
|
| 156 |
+
|
| 157 |
+
else:
|
| 158 |
+
log.info("creating '%s' and adding '%s' to it",
|
| 159 |
+
zip_filename, base_dir)
|
| 160 |
+
|
| 161 |
+
if not dry_run:
|
| 162 |
+
try:
|
| 163 |
+
zip = zipfile.ZipFile(zip_filename, "w",
|
| 164 |
+
compression=zipfile.ZIP_DEFLATED)
|
| 165 |
+
except RuntimeError:
|
| 166 |
+
zip = zipfile.ZipFile(zip_filename, "w",
|
| 167 |
+
compression=zipfile.ZIP_STORED)
|
| 168 |
+
|
| 169 |
+
with zip:
|
| 170 |
+
if base_dir != os.curdir:
|
| 171 |
+
path = os.path.normpath(os.path.join(base_dir, ''))
|
| 172 |
+
zip.write(path, path)
|
| 173 |
+
log.info("adding '%s'", path)
|
| 174 |
+
for dirpath, dirnames, filenames in os.walk(base_dir):
|
| 175 |
+
for name in dirnames:
|
| 176 |
+
path = os.path.normpath(os.path.join(dirpath, name, ''))
|
| 177 |
+
zip.write(path, path)
|
| 178 |
+
log.info("adding '%s'", path)
|
| 179 |
+
for name in filenames:
|
| 180 |
+
path = os.path.normpath(os.path.join(dirpath, name))
|
| 181 |
+
if os.path.isfile(path):
|
| 182 |
+
zip.write(path, path)
|
| 183 |
+
log.info("adding '%s'", path)
|
| 184 |
+
|
| 185 |
+
return zip_filename
|
| 186 |
+
|
| 187 |
+
ARCHIVE_FORMATS = {
|
| 188 |
+
'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
|
| 189 |
+
'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
|
| 190 |
+
'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"),
|
| 191 |
+
'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
|
| 192 |
+
'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
|
| 193 |
+
'zip': (make_zipfile, [],"ZIP file")
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
def check_archive_formats(formats):
|
| 197 |
+
"""Returns the first format from the 'format' list that is unknown.
|
| 198 |
+
|
| 199 |
+
If all formats are known, returns None
|
| 200 |
+
"""
|
| 201 |
+
for format in formats:
|
| 202 |
+
if format not in ARCHIVE_FORMATS:
|
| 203 |
+
return format
|
| 204 |
+
return None
|
| 205 |
+
|
| 206 |
+
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
|
| 207 |
+
dry_run=0, owner=None, group=None):
|
| 208 |
+
"""Create an archive file (eg. zip or tar).
|
| 209 |
+
|
| 210 |
+
'base_name' is the name of the file to create, minus any format-specific
|
| 211 |
+
extension; 'format' is the archive format: one of "zip", "tar", "gztar",
|
| 212 |
+
"bztar", "xztar", or "ztar".
|
| 213 |
+
|
| 214 |
+
'root_dir' is a directory that will be the root directory of the
|
| 215 |
+
archive; ie. we typically chdir into 'root_dir' before creating the
|
| 216 |
+
archive. 'base_dir' is the directory where we start archiving from;
|
| 217 |
+
ie. 'base_dir' will be the common prefix of all files and
|
| 218 |
+
directories in the archive. 'root_dir' and 'base_dir' both default
|
| 219 |
+
to the current directory. Returns the name of the archive file.
|
| 220 |
+
|
| 221 |
+
'owner' and 'group' are used when creating a tar archive. By default,
|
| 222 |
+
uses the current owner and group.
|
| 223 |
+
"""
|
| 224 |
+
save_cwd = os.getcwd()
|
| 225 |
+
if root_dir is not None:
|
| 226 |
+
log.debug("changing into '%s'", root_dir)
|
| 227 |
+
base_name = os.path.abspath(base_name)
|
| 228 |
+
if not dry_run:
|
| 229 |
+
os.chdir(root_dir)
|
| 230 |
+
|
| 231 |
+
if base_dir is None:
|
| 232 |
+
base_dir = os.curdir
|
| 233 |
+
|
| 234 |
+
kwargs = {'dry_run': dry_run}
|
| 235 |
+
|
| 236 |
+
try:
|
| 237 |
+
format_info = ARCHIVE_FORMATS[format]
|
| 238 |
+
except KeyError:
|
| 239 |
+
raise ValueError("unknown archive format '%s'" % format)
|
| 240 |
+
|
| 241 |
+
func = format_info[0]
|
| 242 |
+
for arg, val in format_info[1]:
|
| 243 |
+
kwargs[arg] = val
|
| 244 |
+
|
| 245 |
+
if format != 'zip':
|
| 246 |
+
kwargs['owner'] = owner
|
| 247 |
+
kwargs['group'] = group
|
| 248 |
+
|
| 249 |
+
try:
|
| 250 |
+
filename = func(base_name, base_dir, **kwargs)
|
| 251 |
+
finally:
|
| 252 |
+
if root_dir is not None:
|
| 253 |
+
log.debug("changing back to '%s'", save_cwd)
|
| 254 |
+
os.chdir(save_cwd)
|
| 255 |
+
|
| 256 |
+
return filename
|
evalkit_llava/lib/python3.10/distutils/cmd.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.cmd
|
| 2 |
+
|
| 3 |
+
Provides the Command class, the base class for the command classes
|
| 4 |
+
in the distutils.command package.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import sys, os, re
|
| 8 |
+
from distutils.errors import DistutilsOptionError
|
| 9 |
+
from distutils import util, dir_util, file_util, archive_util, dep_util
|
| 10 |
+
from distutils import log
|
| 11 |
+
|
| 12 |
+
class Command:
|
| 13 |
+
"""Abstract base class for defining command classes, the "worker bees"
|
| 14 |
+
of the Distutils. A useful analogy for command classes is to think of
|
| 15 |
+
them as subroutines with local variables called "options". The options
|
| 16 |
+
are "declared" in 'initialize_options()' and "defined" (given their
|
| 17 |
+
final values, aka "finalized") in 'finalize_options()', both of which
|
| 18 |
+
must be defined by every command class. The distinction between the
|
| 19 |
+
two is necessary because option values might come from the outside
|
| 20 |
+
world (command line, config file, ...), and any options dependent on
|
| 21 |
+
other options must be computed *after* these outside influences have
|
| 22 |
+
been processed -- hence 'finalize_options()'. The "body" of the
|
| 23 |
+
subroutine, where it does all its work based on the values of its
|
| 24 |
+
options, is the 'run()' method, which must also be implemented by every
|
| 25 |
+
command class.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
# 'sub_commands' formalizes the notion of a "family" of commands,
|
| 29 |
+
# eg. "install" as the parent with sub-commands "install_lib",
|
| 30 |
+
# "install_headers", etc. The parent of a family of commands
|
| 31 |
+
# defines 'sub_commands' as a class attribute; it's a list of
|
| 32 |
+
# (command_name : string, predicate : unbound_method | string | None)
|
| 33 |
+
# tuples, where 'predicate' is a method of the parent command that
|
| 34 |
+
# determines whether the corresponding command is applicable in the
|
| 35 |
+
# current situation. (Eg. we "install_headers" is only applicable if
|
| 36 |
+
# we have any C header files to install.) If 'predicate' is None,
|
| 37 |
+
# that command is always applicable.
|
| 38 |
+
#
|
| 39 |
+
# 'sub_commands' is usually defined at the *end* of a class, because
|
| 40 |
+
# predicates can be unbound methods, so they must already have been
|
| 41 |
+
# defined. The canonical example is the "install" command.
|
| 42 |
+
sub_commands = []
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# -- Creation/initialization methods -------------------------------
|
| 46 |
+
|
| 47 |
+
def __init__(self, dist):
|
| 48 |
+
"""Create and initialize a new Command object. Most importantly,
|
| 49 |
+
invokes the 'initialize_options()' method, which is the real
|
| 50 |
+
initializer and depends on the actual command being
|
| 51 |
+
instantiated.
|
| 52 |
+
"""
|
| 53 |
+
# late import because of mutual dependence between these classes
|
| 54 |
+
from distutils.dist import Distribution
|
| 55 |
+
|
| 56 |
+
if not isinstance(dist, Distribution):
|
| 57 |
+
raise TypeError("dist must be a Distribution instance")
|
| 58 |
+
if self.__class__ is Command:
|
| 59 |
+
raise RuntimeError("Command is an abstract class")
|
| 60 |
+
|
| 61 |
+
self.distribution = dist
|
| 62 |
+
self.initialize_options()
|
| 63 |
+
|
| 64 |
+
# Per-command versions of the global flags, so that the user can
|
| 65 |
+
# customize Distutils' behaviour command-by-command and let some
|
| 66 |
+
# commands fall back on the Distribution's behaviour. None means
|
| 67 |
+
# "not defined, check self.distribution's copy", while 0 or 1 mean
|
| 68 |
+
# false and true (duh). Note that this means figuring out the real
|
| 69 |
+
# value of each flag is a touch complicated -- hence "self._dry_run"
|
| 70 |
+
# will be handled by __getattr__, below.
|
| 71 |
+
# XXX This needs to be fixed.
|
| 72 |
+
self._dry_run = None
|
| 73 |
+
|
| 74 |
+
# verbose is largely ignored, but needs to be set for
|
| 75 |
+
# backwards compatibility (I think)?
|
| 76 |
+
self.verbose = dist.verbose
|
| 77 |
+
|
| 78 |
+
# Some commands define a 'self.force' option to ignore file
|
| 79 |
+
# timestamps, but methods defined *here* assume that
|
| 80 |
+
# 'self.force' exists for all commands. So define it here
|
| 81 |
+
# just to be safe.
|
| 82 |
+
self.force = None
|
| 83 |
+
|
| 84 |
+
# The 'help' flag is just used for command-line parsing, so
|
| 85 |
+
# none of that complicated bureaucracy is needed.
|
| 86 |
+
self.help = 0
|
| 87 |
+
|
| 88 |
+
# 'finalized' records whether or not 'finalize_options()' has been
|
| 89 |
+
# called. 'finalize_options()' itself should not pay attention to
|
| 90 |
+
# this flag: it is the business of 'ensure_finalized()', which
|
| 91 |
+
# always calls 'finalize_options()', to respect/update it.
|
| 92 |
+
self.finalized = 0
|
| 93 |
+
|
| 94 |
+
# XXX A more explicit way to customize dry_run would be better.
|
| 95 |
+
def __getattr__(self, attr):
|
| 96 |
+
if attr == 'dry_run':
|
| 97 |
+
myval = getattr(self, "_" + attr)
|
| 98 |
+
if myval is None:
|
| 99 |
+
return getattr(self.distribution, attr)
|
| 100 |
+
else:
|
| 101 |
+
return myval
|
| 102 |
+
else:
|
| 103 |
+
raise AttributeError(attr)
|
| 104 |
+
|
| 105 |
+
def ensure_finalized(self):
|
| 106 |
+
if not self.finalized:
|
| 107 |
+
self.finalize_options()
|
| 108 |
+
self.finalized = 1
|
| 109 |
+
|
| 110 |
+
# Subclasses must define:
|
| 111 |
+
# initialize_options()
|
| 112 |
+
# provide default values for all options; may be customized by
|
| 113 |
+
# setup script, by options from config file(s), or by command-line
|
| 114 |
+
# options
|
| 115 |
+
# finalize_options()
|
| 116 |
+
# decide on the final values for all options; this is called
|
| 117 |
+
# after all possible intervention from the outside world
|
| 118 |
+
# (command-line, option file, etc.) has been processed
|
| 119 |
+
# run()
|
| 120 |
+
# run the command: do whatever it is we're here to do,
|
| 121 |
+
# controlled by the command's various option values
|
| 122 |
+
|
| 123 |
+
def initialize_options(self):
|
| 124 |
+
"""Set default values for all the options that this command
|
| 125 |
+
supports. Note that these defaults may be overridden by other
|
| 126 |
+
commands, by the setup script, by config files, or by the
|
| 127 |
+
command-line. Thus, this is not the place to code dependencies
|
| 128 |
+
between options; generally, 'initialize_options()' implementations
|
| 129 |
+
are just a bunch of "self.foo = None" assignments.
|
| 130 |
+
|
| 131 |
+
This method must be implemented by all command classes.
|
| 132 |
+
"""
|
| 133 |
+
raise RuntimeError("abstract method -- subclass %s must override"
|
| 134 |
+
% self.__class__)
|
| 135 |
+
|
| 136 |
+
def finalize_options(self):
|
| 137 |
+
"""Set final values for all the options that this command supports.
|
| 138 |
+
This is always called as late as possible, ie. after any option
|
| 139 |
+
assignments from the command-line or from other commands have been
|
| 140 |
+
done. Thus, this is the place to code option dependencies: if
|
| 141 |
+
'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
|
| 142 |
+
long as 'foo' still has the same value it was assigned in
|
| 143 |
+
'initialize_options()'.
|
| 144 |
+
|
| 145 |
+
This method must be implemented by all command classes.
|
| 146 |
+
"""
|
| 147 |
+
raise RuntimeError("abstract method -- subclass %s must override"
|
| 148 |
+
% self.__class__)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def dump_options(self, header=None, indent=""):
|
| 152 |
+
from distutils.fancy_getopt import longopt_xlate
|
| 153 |
+
if header is None:
|
| 154 |
+
header = "command options for '%s':" % self.get_command_name()
|
| 155 |
+
self.announce(indent + header, level=log.INFO)
|
| 156 |
+
indent = indent + " "
|
| 157 |
+
for (option, _, _) in self.user_options:
|
| 158 |
+
option = option.translate(longopt_xlate)
|
| 159 |
+
if option[-1] == "=":
|
| 160 |
+
option = option[:-1]
|
| 161 |
+
value = getattr(self, option)
|
| 162 |
+
self.announce(indent + "%s = %s" % (option, value),
|
| 163 |
+
level=log.INFO)
|
| 164 |
+
|
| 165 |
+
def run(self):
|
| 166 |
+
"""A command's raison d'etre: carry out the action it exists to
|
| 167 |
+
perform, controlled by the options initialized in
|
| 168 |
+
'initialize_options()', customized by other commands, the setup
|
| 169 |
+
script, the command-line, and config files, and finalized in
|
| 170 |
+
'finalize_options()'. All terminal output and filesystem
|
| 171 |
+
interaction should be done by 'run()'.
|
| 172 |
+
|
| 173 |
+
This method must be implemented by all command classes.
|
| 174 |
+
"""
|
| 175 |
+
raise RuntimeError("abstract method -- subclass %s must override"
|
| 176 |
+
% self.__class__)
|
| 177 |
+
|
| 178 |
+
def announce(self, msg, level=1):
|
| 179 |
+
"""If the current verbosity level is of greater than or equal to
|
| 180 |
+
'level' print 'msg' to stdout.
|
| 181 |
+
"""
|
| 182 |
+
log.log(level, msg)
|
| 183 |
+
|
| 184 |
+
def debug_print(self, msg):
|
| 185 |
+
"""Print 'msg' to stdout if the global DEBUG (taken from the
|
| 186 |
+
DISTUTILS_DEBUG environment variable) flag is true.
|
| 187 |
+
"""
|
| 188 |
+
from distutils.debug import DEBUG
|
| 189 |
+
if DEBUG:
|
| 190 |
+
print(msg)
|
| 191 |
+
sys.stdout.flush()
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
# -- Option validation methods -------------------------------------
|
| 195 |
+
# (these are very handy in writing the 'finalize_options()' method)
|
| 196 |
+
#
|
| 197 |
+
# NB. the general philosophy here is to ensure that a particular option
|
| 198 |
+
# value meets certain type and value constraints. If not, we try to
|
| 199 |
+
# force it into conformance (eg. if we expect a list but have a string,
|
| 200 |
+
# split the string on comma and/or whitespace). If we can't force the
|
| 201 |
+
# option into conformance, raise DistutilsOptionError. Thus, command
|
| 202 |
+
# classes need do nothing more than (eg.)
|
| 203 |
+
# self.ensure_string_list('foo')
|
| 204 |
+
# and they can be guaranteed that thereafter, self.foo will be
|
| 205 |
+
# a list of strings.
|
| 206 |
+
|
| 207 |
+
def _ensure_stringlike(self, option, what, default=None):
|
| 208 |
+
val = getattr(self, option)
|
| 209 |
+
if val is None:
|
| 210 |
+
setattr(self, option, default)
|
| 211 |
+
return default
|
| 212 |
+
elif not isinstance(val, str):
|
| 213 |
+
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
|
| 214 |
+
% (option, what, val))
|
| 215 |
+
return val
|
| 216 |
+
|
| 217 |
+
def ensure_string(self, option, default=None):
|
| 218 |
+
"""Ensure that 'option' is a string; if not defined, set it to
|
| 219 |
+
'default'.
|
| 220 |
+
"""
|
| 221 |
+
self._ensure_stringlike(option, "string", default)
|
| 222 |
+
|
| 223 |
+
def ensure_string_list(self, option):
|
| 224 |
+
r"""Ensure that 'option' is a list of strings. If 'option' is
|
| 225 |
+
currently a string, we split it either on /,\s*/ or /\s+/, so
|
| 226 |
+
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
|
| 227 |
+
["foo", "bar", "baz"].
|
| 228 |
+
"""
|
| 229 |
+
val = getattr(self, option)
|
| 230 |
+
if val is None:
|
| 231 |
+
return
|
| 232 |
+
elif isinstance(val, str):
|
| 233 |
+
setattr(self, option, re.split(r',\s*|\s+', val))
|
| 234 |
+
else:
|
| 235 |
+
if isinstance(val, list):
|
| 236 |
+
ok = all(isinstance(v, str) for v in val)
|
| 237 |
+
else:
|
| 238 |
+
ok = False
|
| 239 |
+
if not ok:
|
| 240 |
+
raise DistutilsOptionError(
|
| 241 |
+
"'%s' must be a list of strings (got %r)"
|
| 242 |
+
% (option, val))
|
| 243 |
+
|
| 244 |
+
def _ensure_tested_string(self, option, tester, what, error_fmt,
|
| 245 |
+
default=None):
|
| 246 |
+
val = self._ensure_stringlike(option, what, default)
|
| 247 |
+
if val is not None and not tester(val):
|
| 248 |
+
raise DistutilsOptionError(("error in '%s' option: " + error_fmt)
|
| 249 |
+
% (option, val))
|
| 250 |
+
|
| 251 |
+
def ensure_filename(self, option):
|
| 252 |
+
"""Ensure that 'option' is the name of an existing file."""
|
| 253 |
+
self._ensure_tested_string(option, os.path.isfile,
|
| 254 |
+
"filename",
|
| 255 |
+
"'%s' does not exist or is not a file")
|
| 256 |
+
|
| 257 |
+
def ensure_dirname(self, option):
|
| 258 |
+
self._ensure_tested_string(option, os.path.isdir,
|
| 259 |
+
"directory name",
|
| 260 |
+
"'%s' does not exist or is not a directory")
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
# -- Convenience methods for commands ------------------------------
|
| 264 |
+
|
| 265 |
+
def get_command_name(self):
|
| 266 |
+
if hasattr(self, 'command_name'):
|
| 267 |
+
return self.command_name
|
| 268 |
+
else:
|
| 269 |
+
return self.__class__.__name__
|
| 270 |
+
|
| 271 |
+
def set_undefined_options(self, src_cmd, *option_pairs):
|
| 272 |
+
"""Set the values of any "undefined" options from corresponding
|
| 273 |
+
option values in some other command object. "Undefined" here means
|
| 274 |
+
"is None", which is the convention used to indicate that an option
|
| 275 |
+
has not been changed between 'initialize_options()' and
|
| 276 |
+
'finalize_options()'. Usually called from 'finalize_options()' for
|
| 277 |
+
options that depend on some other command rather than another
|
| 278 |
+
option of the same command. 'src_cmd' is the other command from
|
| 279 |
+
which option values will be taken (a command object will be created
|
| 280 |
+
for it if necessary); the remaining arguments are
|
| 281 |
+
'(src_option,dst_option)' tuples which mean "take the value of
|
| 282 |
+
'src_option' in the 'src_cmd' command object, and copy it to
|
| 283 |
+
'dst_option' in the current command object".
|
| 284 |
+
"""
|
| 285 |
+
# Option_pairs: list of (src_option, dst_option) tuples
|
| 286 |
+
src_cmd_obj = self.distribution.get_command_obj(src_cmd)
|
| 287 |
+
src_cmd_obj.ensure_finalized()
|
| 288 |
+
for (src_option, dst_option) in option_pairs:
|
| 289 |
+
if getattr(self, dst_option) is None:
|
| 290 |
+
setattr(self, dst_option, getattr(src_cmd_obj, src_option))
|
| 291 |
+
|
| 292 |
+
def get_finalized_command(self, command, create=1):
|
| 293 |
+
"""Wrapper around Distribution's 'get_command_obj()' method: find
|
| 294 |
+
(create if necessary and 'create' is true) the command object for
|
| 295 |
+
'command', call its 'ensure_finalized()' method, and return the
|
| 296 |
+
finalized command object.
|
| 297 |
+
"""
|
| 298 |
+
cmd_obj = self.distribution.get_command_obj(command, create)
|
| 299 |
+
cmd_obj.ensure_finalized()
|
| 300 |
+
return cmd_obj
|
| 301 |
+
|
| 302 |
+
# XXX rename to 'get_reinitialized_command()'? (should do the
|
| 303 |
+
# same in dist.py, if so)
|
| 304 |
+
def reinitialize_command(self, command, reinit_subcommands=0):
|
| 305 |
+
return self.distribution.reinitialize_command(command,
|
| 306 |
+
reinit_subcommands)
|
| 307 |
+
|
| 308 |
+
def run_command(self, command):
|
| 309 |
+
"""Run some other command: uses the 'run_command()' method of
|
| 310 |
+
Distribution, which creates and finalizes the command object if
|
| 311 |
+
necessary and then invokes its 'run()' method.
|
| 312 |
+
"""
|
| 313 |
+
self.distribution.run_command(command)
|
| 314 |
+
|
| 315 |
+
def get_sub_commands(self):
|
| 316 |
+
"""Determine the sub-commands that are relevant in the current
|
| 317 |
+
distribution (ie., that need to be run). This is based on the
|
| 318 |
+
'sub_commands' class attribute: each tuple in that list may include
|
| 319 |
+
a method that we call to determine if the subcommand needs to be
|
| 320 |
+
run for the current distribution. Return a list of command names.
|
| 321 |
+
"""
|
| 322 |
+
commands = []
|
| 323 |
+
for (cmd_name, method) in self.sub_commands:
|
| 324 |
+
if method is None or method(self):
|
| 325 |
+
commands.append(cmd_name)
|
| 326 |
+
return commands
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
# -- External world manipulation -----------------------------------
|
| 330 |
+
|
| 331 |
+
def warn(self, msg):
|
| 332 |
+
log.warn("warning: %s: %s\n", self.get_command_name(), msg)
|
| 333 |
+
|
| 334 |
+
def execute(self, func, args, msg=None, level=1):
|
| 335 |
+
util.execute(func, args, msg, dry_run=self.dry_run)
|
| 336 |
+
|
| 337 |
+
def mkpath(self, name, mode=0o777):
|
| 338 |
+
dir_util.mkpath(name, mode, dry_run=self.dry_run)
|
| 339 |
+
|
| 340 |
+
def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1,
|
| 341 |
+
link=None, level=1):
|
| 342 |
+
"""Copy a file respecting verbose, dry-run and force flags. (The
|
| 343 |
+
former two default to whatever is in the Distribution object, and
|
| 344 |
+
the latter defaults to false for commands that don't define it.)"""
|
| 345 |
+
return file_util.copy_file(infile, outfile, preserve_mode,
|
| 346 |
+
preserve_times, not self.force, link,
|
| 347 |
+
dry_run=self.dry_run)
|
| 348 |
+
|
| 349 |
+
def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1,
|
| 350 |
+
preserve_symlinks=0, level=1):
|
| 351 |
+
"""Copy an entire directory tree respecting verbose, dry-run,
|
| 352 |
+
and force flags.
|
| 353 |
+
"""
|
| 354 |
+
return dir_util.copy_tree(infile, outfile, preserve_mode,
|
| 355 |
+
preserve_times, preserve_symlinks,
|
| 356 |
+
not self.force, dry_run=self.dry_run)
|
| 357 |
+
|
| 358 |
+
def move_file (self, src, dst, level=1):
|
| 359 |
+
"""Move a file respecting dry-run flag."""
|
| 360 |
+
return file_util.move_file(src, dst, dry_run=self.dry_run)
|
| 361 |
+
|
| 362 |
+
def spawn(self, cmd, search_path=1, level=1):
|
| 363 |
+
"""Spawn an external command respecting dry-run flag."""
|
| 364 |
+
from distutils.spawn import spawn
|
| 365 |
+
spawn(cmd, search_path, dry_run=self.dry_run)
|
| 366 |
+
|
| 367 |
+
def make_archive(self, base_name, format, root_dir=None, base_dir=None,
|
| 368 |
+
owner=None, group=None):
|
| 369 |
+
return archive_util.make_archive(base_name, format, root_dir, base_dir,
|
| 370 |
+
dry_run=self.dry_run,
|
| 371 |
+
owner=owner, group=group)
|
| 372 |
+
|
| 373 |
+
def make_file(self, infiles, outfile, func, args,
|
| 374 |
+
exec_msg=None, skip_msg=None, level=1):
|
| 375 |
+
"""Special case of 'execute()' for operations that process one or
|
| 376 |
+
more input files and generate one output file. Works just like
|
| 377 |
+
'execute()', except the operation is skipped and a different
|
| 378 |
+
message printed if 'outfile' already exists and is newer than all
|
| 379 |
+
files listed in 'infiles'. If the command defined 'self.force',
|
| 380 |
+
and it is true, then the command is unconditionally run -- does no
|
| 381 |
+
timestamp checks.
|
| 382 |
+
"""
|
| 383 |
+
if skip_msg is None:
|
| 384 |
+
skip_msg = "skipping %s (inputs unchanged)" % outfile
|
| 385 |
+
|
| 386 |
+
# Allow 'infiles' to be a single string
|
| 387 |
+
if isinstance(infiles, str):
|
| 388 |
+
infiles = (infiles,)
|
| 389 |
+
elif not isinstance(infiles, (list, tuple)):
|
| 390 |
+
raise TypeError(
|
| 391 |
+
"'infiles' must be a string, or a list or tuple of strings")
|
| 392 |
+
|
| 393 |
+
if exec_msg is None:
|
| 394 |
+
exec_msg = "generating %s from %s" % (outfile, ', '.join(infiles))
|
| 395 |
+
|
| 396 |
+
# If 'outfile' must be regenerated (either because it doesn't
|
| 397 |
+
# exist, is out-of-date, or the 'force' flag is true) then
|
| 398 |
+
# perform the action that presumably regenerates it
|
| 399 |
+
if self.force or dep_util.newer_group(infiles, outfile):
|
| 400 |
+
self.execute(func, args, exec_msg, level)
|
| 401 |
+
# Otherwise, print the "skip" message
|
| 402 |
+
else:
|
| 403 |
+
log.debug(skip_msg)
|
evalkit_llava/lib/python3.10/distutils/config.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.pypirc
|
| 2 |
+
|
| 3 |
+
Provides the PyPIRCCommand class, the base class for the command classes
|
| 4 |
+
that uses .pypirc in the distutils.command package.
|
| 5 |
+
"""
|
| 6 |
+
import os
|
| 7 |
+
from configparser import RawConfigParser
|
| 8 |
+
|
| 9 |
+
from distutils.cmd import Command
|
| 10 |
+
|
| 11 |
+
DEFAULT_PYPIRC = """\
|
| 12 |
+
[distutils]
|
| 13 |
+
index-servers =
|
| 14 |
+
pypi
|
| 15 |
+
|
| 16 |
+
[pypi]
|
| 17 |
+
username:%s
|
| 18 |
+
password:%s
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
class PyPIRCCommand(Command):
|
| 22 |
+
"""Base command that knows how to handle the .pypirc file
|
| 23 |
+
"""
|
| 24 |
+
DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
|
| 25 |
+
DEFAULT_REALM = 'pypi'
|
| 26 |
+
repository = None
|
| 27 |
+
realm = None
|
| 28 |
+
|
| 29 |
+
user_options = [
|
| 30 |
+
('repository=', 'r',
|
| 31 |
+
"url of repository [default: %s]" % \
|
| 32 |
+
DEFAULT_REPOSITORY),
|
| 33 |
+
('show-response', None,
|
| 34 |
+
'display full response text from server')]
|
| 35 |
+
|
| 36 |
+
boolean_options = ['show-response']
|
| 37 |
+
|
| 38 |
+
def _get_rc_file(self):
|
| 39 |
+
"""Returns rc file path."""
|
| 40 |
+
return os.path.join(os.path.expanduser('~'), '.pypirc')
|
| 41 |
+
|
| 42 |
+
def _store_pypirc(self, username, password):
|
| 43 |
+
"""Creates a default .pypirc file."""
|
| 44 |
+
rc = self._get_rc_file()
|
| 45 |
+
with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
|
| 46 |
+
f.write(DEFAULT_PYPIRC % (username, password))
|
| 47 |
+
|
| 48 |
+
def _read_pypirc(self):
|
| 49 |
+
"""Reads the .pypirc file."""
|
| 50 |
+
rc = self._get_rc_file()
|
| 51 |
+
if os.path.exists(rc):
|
| 52 |
+
self.announce('Using PyPI login from %s' % rc)
|
| 53 |
+
repository = self.repository or self.DEFAULT_REPOSITORY
|
| 54 |
+
|
| 55 |
+
config = RawConfigParser()
|
| 56 |
+
config.read(rc)
|
| 57 |
+
sections = config.sections()
|
| 58 |
+
if 'distutils' in sections:
|
| 59 |
+
# let's get the list of servers
|
| 60 |
+
index_servers = config.get('distutils', 'index-servers')
|
| 61 |
+
_servers = [server.strip() for server in
|
| 62 |
+
index_servers.split('\n')
|
| 63 |
+
if server.strip() != '']
|
| 64 |
+
if _servers == []:
|
| 65 |
+
# nothing set, let's try to get the default pypi
|
| 66 |
+
if 'pypi' in sections:
|
| 67 |
+
_servers = ['pypi']
|
| 68 |
+
else:
|
| 69 |
+
# the file is not properly defined, returning
|
| 70 |
+
# an empty dict
|
| 71 |
+
return {}
|
| 72 |
+
for server in _servers:
|
| 73 |
+
current = {'server': server}
|
| 74 |
+
current['username'] = config.get(server, 'username')
|
| 75 |
+
|
| 76 |
+
# optional params
|
| 77 |
+
for key, default in (('repository',
|
| 78 |
+
self.DEFAULT_REPOSITORY),
|
| 79 |
+
('realm', self.DEFAULT_REALM),
|
| 80 |
+
('password', None)):
|
| 81 |
+
if config.has_option(server, key):
|
| 82 |
+
current[key] = config.get(server, key)
|
| 83 |
+
else:
|
| 84 |
+
current[key] = default
|
| 85 |
+
|
| 86 |
+
# work around people having "repository" for the "pypi"
|
| 87 |
+
# section of their config set to the HTTP (rather than
|
| 88 |
+
# HTTPS) URL
|
| 89 |
+
if (server == 'pypi' and
|
| 90 |
+
repository in (self.DEFAULT_REPOSITORY, 'pypi')):
|
| 91 |
+
current['repository'] = self.DEFAULT_REPOSITORY
|
| 92 |
+
return current
|
| 93 |
+
|
| 94 |
+
if (current['server'] == repository or
|
| 95 |
+
current['repository'] == repository):
|
| 96 |
+
return current
|
| 97 |
+
elif 'server-login' in sections:
|
| 98 |
+
# old format
|
| 99 |
+
server = 'server-login'
|
| 100 |
+
if config.has_option(server, 'repository'):
|
| 101 |
+
repository = config.get(server, 'repository')
|
| 102 |
+
else:
|
| 103 |
+
repository = self.DEFAULT_REPOSITORY
|
| 104 |
+
return {'username': config.get(server, 'username'),
|
| 105 |
+
'password': config.get(server, 'password'),
|
| 106 |
+
'repository': repository,
|
| 107 |
+
'server': server,
|
| 108 |
+
'realm': self.DEFAULT_REALM}
|
| 109 |
+
|
| 110 |
+
return {}
|
| 111 |
+
|
| 112 |
+
def _read_pypi_response(self, response):
|
| 113 |
+
"""Read and decode a PyPI HTTP response."""
|
| 114 |
+
import cgi
|
| 115 |
+
content_type = response.getheader('content-type', 'text/plain')
|
| 116 |
+
encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii')
|
| 117 |
+
return response.read().decode(encoding)
|
| 118 |
+
|
| 119 |
+
def initialize_options(self):
|
| 120 |
+
"""Initialize options."""
|
| 121 |
+
self.repository = None
|
| 122 |
+
self.realm = None
|
| 123 |
+
self.show_response = 0
|
| 124 |
+
|
| 125 |
+
def finalize_options(self):
|
| 126 |
+
"""Finalizes options."""
|
| 127 |
+
if self.repository is None:
|
| 128 |
+
self.repository = self.DEFAULT_REPOSITORY
|
| 129 |
+
if self.realm is None:
|
| 130 |
+
self.realm = self.DEFAULT_REALM
|
evalkit_llava/lib/python3.10/distutils/debug.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
# If DISTUTILS_DEBUG is anything other than the empty string, we run in
|
| 4 |
+
# debug mode.
|
| 5 |
+
DEBUG = os.environ.get('DISTUTILS_DEBUG')
|
evalkit_llava/lib/python3.10/distutils/dep_util.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.dep_util
|
| 2 |
+
|
| 3 |
+
Utility functions for simple, timestamp-based dependency of files
|
| 4 |
+
and groups of files; also, function based entirely on such
|
| 5 |
+
timestamp dependency analysis."""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
from distutils.errors import DistutilsFileError
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def newer (source, target):
|
| 12 |
+
"""Return true if 'source' exists and is more recently modified than
|
| 13 |
+
'target', or if 'source' exists and 'target' doesn't. Return false if
|
| 14 |
+
both exist and 'target' is the same age or younger than 'source'.
|
| 15 |
+
Raise DistutilsFileError if 'source' does not exist.
|
| 16 |
+
"""
|
| 17 |
+
if not os.path.exists(source):
|
| 18 |
+
raise DistutilsFileError("file '%s' does not exist" %
|
| 19 |
+
os.path.abspath(source))
|
| 20 |
+
if not os.path.exists(target):
|
| 21 |
+
return 1
|
| 22 |
+
|
| 23 |
+
from stat import ST_MTIME
|
| 24 |
+
mtime1 = os.stat(source)[ST_MTIME]
|
| 25 |
+
mtime2 = os.stat(target)[ST_MTIME]
|
| 26 |
+
|
| 27 |
+
return mtime1 > mtime2
|
| 28 |
+
|
| 29 |
+
# newer ()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def newer_pairwise (sources, targets):
|
| 33 |
+
"""Walk two filename lists in parallel, testing if each source is newer
|
| 34 |
+
than its corresponding target. Return a pair of lists (sources,
|
| 35 |
+
targets) where source is newer than target, according to the semantics
|
| 36 |
+
of 'newer()'.
|
| 37 |
+
"""
|
| 38 |
+
if len(sources) != len(targets):
|
| 39 |
+
raise ValueError("'sources' and 'targets' must be same length")
|
| 40 |
+
|
| 41 |
+
# build a pair of lists (sources, targets) where source is newer
|
| 42 |
+
n_sources = []
|
| 43 |
+
n_targets = []
|
| 44 |
+
for i in range(len(sources)):
|
| 45 |
+
if newer(sources[i], targets[i]):
|
| 46 |
+
n_sources.append(sources[i])
|
| 47 |
+
n_targets.append(targets[i])
|
| 48 |
+
|
| 49 |
+
return (n_sources, n_targets)
|
| 50 |
+
|
| 51 |
+
# newer_pairwise ()
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def newer_group (sources, target, missing='error'):
|
| 55 |
+
"""Return true if 'target' is out-of-date with respect to any file
|
| 56 |
+
listed in 'sources'. In other words, if 'target' exists and is newer
|
| 57 |
+
than every file in 'sources', return false; otherwise return true.
|
| 58 |
+
'missing' controls what we do when a source file is missing; the
|
| 59 |
+
default ("error") is to blow up with an OSError from inside 'stat()';
|
| 60 |
+
if it is "ignore", we silently drop any missing source files; if it is
|
| 61 |
+
"newer", any missing source files make us assume that 'target' is
|
| 62 |
+
out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
|
| 63 |
+
carry out commands that wouldn't work because inputs are missing, but
|
| 64 |
+
that doesn't matter because you're not actually going to run the
|
| 65 |
+
commands).
|
| 66 |
+
"""
|
| 67 |
+
# If the target doesn't even exist, then it's definitely out-of-date.
|
| 68 |
+
if not os.path.exists(target):
|
| 69 |
+
return 1
|
| 70 |
+
|
| 71 |
+
# Otherwise we have to find out the hard way: if *any* source file
|
| 72 |
+
# is more recent than 'target', then 'target' is out-of-date and
|
| 73 |
+
# we can immediately return true. If we fall through to the end
|
| 74 |
+
# of the loop, then 'target' is up-to-date and we return false.
|
| 75 |
+
from stat import ST_MTIME
|
| 76 |
+
target_mtime = os.stat(target)[ST_MTIME]
|
| 77 |
+
for source in sources:
|
| 78 |
+
if not os.path.exists(source):
|
| 79 |
+
if missing == 'error': # blow up when we stat() the file
|
| 80 |
+
pass
|
| 81 |
+
elif missing == 'ignore': # missing source dropped from
|
| 82 |
+
continue # target's dependency list
|
| 83 |
+
elif missing == 'newer': # missing source means target is
|
| 84 |
+
return 1 # out-of-date
|
| 85 |
+
|
| 86 |
+
source_mtime = os.stat(source)[ST_MTIME]
|
| 87 |
+
if source_mtime > target_mtime:
|
| 88 |
+
return 1
|
| 89 |
+
else:
|
| 90 |
+
return 0
|
| 91 |
+
|
| 92 |
+
# newer_group ()
|
evalkit_llava/lib/python3.10/distutils/dist.py
ADDED
|
@@ -0,0 +1,1256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.dist
|
| 2 |
+
|
| 3 |
+
Provides the Distribution class, which represents the module distribution
|
| 4 |
+
being built/installed/distributed.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
import re
|
| 10 |
+
from email import message_from_file
|
| 11 |
+
|
| 12 |
+
try:
|
| 13 |
+
import warnings
|
| 14 |
+
except ImportError:
|
| 15 |
+
warnings = None
|
| 16 |
+
|
| 17 |
+
from distutils.errors import *
|
| 18 |
+
from distutils.fancy_getopt import FancyGetopt, translate_longopt
|
| 19 |
+
from distutils.util import check_environ, strtobool, rfc822_escape
|
| 20 |
+
from distutils import log
|
| 21 |
+
from distutils.debug import DEBUG
|
| 22 |
+
|
| 23 |
+
# Regex to define acceptable Distutils command names. This is not *quite*
|
| 24 |
+
# the same as a Python NAME -- I don't allow leading underscores. The fact
|
| 25 |
+
# that they're very similar is no coincidence; the default naming scheme is
|
| 26 |
+
# to look for a Python module named after the command.
|
| 27 |
+
command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _ensure_list(value, fieldname):
|
| 31 |
+
if isinstance(value, str):
|
| 32 |
+
# a string containing comma separated values is okay. It will
|
| 33 |
+
# be converted to a list by Distribution.finalize_options().
|
| 34 |
+
pass
|
| 35 |
+
elif not isinstance(value, list):
|
| 36 |
+
# passing a tuple or an iterator perhaps, warn and convert
|
| 37 |
+
typename = type(value).__name__
|
| 38 |
+
msg = f"Warning: '{fieldname}' should be a list, got type '{typename}'"
|
| 39 |
+
log.log(log.WARN, msg)
|
| 40 |
+
value = list(value)
|
| 41 |
+
return value
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Distribution:
|
| 45 |
+
"""The core of the Distutils. Most of the work hiding behind 'setup'
|
| 46 |
+
is really done within a Distribution instance, which farms the work out
|
| 47 |
+
to the Distutils commands specified on the command line.
|
| 48 |
+
|
| 49 |
+
Setup scripts will almost never instantiate Distribution directly,
|
| 50 |
+
unless the 'setup()' function is totally inadequate to their needs.
|
| 51 |
+
However, it is conceivable that a setup script might wish to subclass
|
| 52 |
+
Distribution for some specialized purpose, and then pass the subclass
|
| 53 |
+
to 'setup()' as the 'distclass' keyword argument. If so, it is
|
| 54 |
+
necessary to respect the expectations that 'setup' has of Distribution.
|
| 55 |
+
See the code for 'setup()', in core.py, for details.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
# 'global_options' describes the command-line options that may be
|
| 59 |
+
# supplied to the setup script prior to any actual commands.
|
| 60 |
+
# Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
|
| 61 |
+
# these global options. This list should be kept to a bare minimum,
|
| 62 |
+
# since every global option is also valid as a command option -- and we
|
| 63 |
+
# don't want to pollute the commands with too many options that they
|
| 64 |
+
# have minimal control over.
|
| 65 |
+
# The fourth entry for verbose means that it can be repeated.
|
| 66 |
+
global_options = [
|
| 67 |
+
('verbose', 'v', "run verbosely (default)", 1),
|
| 68 |
+
('quiet', 'q', "run quietly (turns verbosity off)"),
|
| 69 |
+
('dry-run', 'n', "don't actually do anything"),
|
| 70 |
+
('help', 'h', "show detailed help message"),
|
| 71 |
+
('no-user-cfg', None,
|
| 72 |
+
'ignore pydistutils.cfg in your home directory'),
|
| 73 |
+
]
|
| 74 |
+
|
| 75 |
+
# 'common_usage' is a short (2-3 line) string describing the common
|
| 76 |
+
# usage of the setup script.
|
| 77 |
+
common_usage = """\
|
| 78 |
+
Common commands: (see '--help-commands' for more)
|
| 79 |
+
|
| 80 |
+
setup.py build will build the package underneath 'build/'
|
| 81 |
+
setup.py install will install the package
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
# options that are not propagated to the commands
|
| 85 |
+
display_options = [
|
| 86 |
+
('help-commands', None,
|
| 87 |
+
"list all available commands"),
|
| 88 |
+
('name', None,
|
| 89 |
+
"print package name"),
|
| 90 |
+
('version', 'V',
|
| 91 |
+
"print package version"),
|
| 92 |
+
('fullname', None,
|
| 93 |
+
"print <package name>-<version>"),
|
| 94 |
+
('author', None,
|
| 95 |
+
"print the author's name"),
|
| 96 |
+
('author-email', None,
|
| 97 |
+
"print the author's email address"),
|
| 98 |
+
('maintainer', None,
|
| 99 |
+
"print the maintainer's name"),
|
| 100 |
+
('maintainer-email', None,
|
| 101 |
+
"print the maintainer's email address"),
|
| 102 |
+
('contact', None,
|
| 103 |
+
"print the maintainer's name if known, else the author's"),
|
| 104 |
+
('contact-email', None,
|
| 105 |
+
"print the maintainer's email address if known, else the author's"),
|
| 106 |
+
('url', None,
|
| 107 |
+
"print the URL for this package"),
|
| 108 |
+
('license', None,
|
| 109 |
+
"print the license of the package"),
|
| 110 |
+
('licence', None,
|
| 111 |
+
"alias for --license"),
|
| 112 |
+
('description', None,
|
| 113 |
+
"print the package description"),
|
| 114 |
+
('long-description', None,
|
| 115 |
+
"print the long package description"),
|
| 116 |
+
('platforms', None,
|
| 117 |
+
"print the list of platforms"),
|
| 118 |
+
('classifiers', None,
|
| 119 |
+
"print the list of classifiers"),
|
| 120 |
+
('keywords', None,
|
| 121 |
+
"print the list of keywords"),
|
| 122 |
+
('provides', None,
|
| 123 |
+
"print the list of packages/modules provided"),
|
| 124 |
+
('requires', None,
|
| 125 |
+
"print the list of packages/modules required"),
|
| 126 |
+
('obsoletes', None,
|
| 127 |
+
"print the list of packages/modules made obsolete")
|
| 128 |
+
]
|
| 129 |
+
display_option_names = [translate_longopt(x[0]) for x in display_options]
|
| 130 |
+
|
| 131 |
+
# negative options are options that exclude other options
|
| 132 |
+
negative_opt = {'quiet': 'verbose'}
|
| 133 |
+
|
| 134 |
+
# -- Creation/initialization methods -------------------------------
|
| 135 |
+
|
| 136 |
+
def __init__(self, attrs=None):
|
| 137 |
+
"""Construct a new Distribution instance: initialize all the
|
| 138 |
+
attributes of a Distribution, and then use 'attrs' (a dictionary
|
| 139 |
+
mapping attribute names to values) to assign some of those
|
| 140 |
+
attributes their "real" values. (Any attributes not mentioned in
|
| 141 |
+
'attrs' will be assigned to some null value: 0, None, an empty list
|
| 142 |
+
or dictionary, etc.) Most importantly, initialize the
|
| 143 |
+
'command_obj' attribute to the empty dictionary; this will be
|
| 144 |
+
filled in with real command objects by 'parse_command_line()'.
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
# Default values for our command-line options
|
| 148 |
+
self.verbose = 1
|
| 149 |
+
self.dry_run = 0
|
| 150 |
+
self.help = 0
|
| 151 |
+
for attr in self.display_option_names:
|
| 152 |
+
setattr(self, attr, 0)
|
| 153 |
+
|
| 154 |
+
# Store the distribution meta-data (name, version, author, and so
|
| 155 |
+
# forth) in a separate object -- we're getting to have enough
|
| 156 |
+
# information here (and enough command-line options) that it's
|
| 157 |
+
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
|
| 158 |
+
# object in a sneaky and underhanded (but efficient!) way.
|
| 159 |
+
self.metadata = DistributionMetadata()
|
| 160 |
+
for basename in self.metadata._METHOD_BASENAMES:
|
| 161 |
+
method_name = "get_" + basename
|
| 162 |
+
setattr(self, method_name, getattr(self.metadata, method_name))
|
| 163 |
+
|
| 164 |
+
# 'cmdclass' maps command names to class objects, so we
|
| 165 |
+
# can 1) quickly figure out which class to instantiate when
|
| 166 |
+
# we need to create a new command object, and 2) have a way
|
| 167 |
+
# for the setup script to override command classes
|
| 168 |
+
self.cmdclass = {}
|
| 169 |
+
|
| 170 |
+
# 'command_packages' is a list of packages in which commands
|
| 171 |
+
# are searched for. The factory for command 'foo' is expected
|
| 172 |
+
# to be named 'foo' in the module 'foo' in one of the packages
|
| 173 |
+
# named here. This list is searched from the left; an error
|
| 174 |
+
# is raised if no named package provides the command being
|
| 175 |
+
# searched for. (Always access using get_command_packages().)
|
| 176 |
+
self.command_packages = None
|
| 177 |
+
|
| 178 |
+
# 'script_name' and 'script_args' are usually set to sys.argv[0]
|
| 179 |
+
# and sys.argv[1:], but they can be overridden when the caller is
|
| 180 |
+
# not necessarily a setup script run from the command-line.
|
| 181 |
+
self.script_name = None
|
| 182 |
+
self.script_args = None
|
| 183 |
+
|
| 184 |
+
# 'command_options' is where we store command options between
|
| 185 |
+
# parsing them (from config files, the command-line, etc.) and when
|
| 186 |
+
# they are actually needed -- ie. when the command in question is
|
| 187 |
+
# instantiated. It is a dictionary of dictionaries of 2-tuples:
|
| 188 |
+
# command_options = { command_name : { option : (source, value) } }
|
| 189 |
+
self.command_options = {}
|
| 190 |
+
|
| 191 |
+
# 'dist_files' is the list of (command, pyversion, file) that
|
| 192 |
+
# have been created by any dist commands run so far. This is
|
| 193 |
+
# filled regardless of whether the run is dry or not. pyversion
|
| 194 |
+
# gives sysconfig.get_python_version() if the dist file is
|
| 195 |
+
# specific to a Python version, 'any' if it is good for all
|
| 196 |
+
# Python versions on the target platform, and '' for a source
|
| 197 |
+
# file. pyversion should not be used to specify minimum or
|
| 198 |
+
# maximum required Python versions; use the metainfo for that
|
| 199 |
+
# instead.
|
| 200 |
+
self.dist_files = []
|
| 201 |
+
|
| 202 |
+
# These options are really the business of various commands, rather
|
| 203 |
+
# than of the Distribution itself. We provide aliases for them in
|
| 204 |
+
# Distribution as a convenience to the developer.
|
| 205 |
+
self.packages = None
|
| 206 |
+
self.package_data = {}
|
| 207 |
+
self.package_dir = None
|
| 208 |
+
self.py_modules = None
|
| 209 |
+
self.libraries = None
|
| 210 |
+
self.headers = None
|
| 211 |
+
self.ext_modules = None
|
| 212 |
+
self.ext_package = None
|
| 213 |
+
self.include_dirs = None
|
| 214 |
+
self.extra_path = None
|
| 215 |
+
self.scripts = None
|
| 216 |
+
self.data_files = None
|
| 217 |
+
self.password = ''
|
| 218 |
+
|
| 219 |
+
# And now initialize bookkeeping stuff that can't be supplied by
|
| 220 |
+
# the caller at all. 'command_obj' maps command names to
|
| 221 |
+
# Command instances -- that's how we enforce that every command
|
| 222 |
+
# class is a singleton.
|
| 223 |
+
self.command_obj = {}
|
| 224 |
+
|
| 225 |
+
# 'have_run' maps command names to boolean values; it keeps track
|
| 226 |
+
# of whether we have actually run a particular command, to make it
|
| 227 |
+
# cheap to "run" a command whenever we think we might need to -- if
|
| 228 |
+
# it's already been done, no need for expensive filesystem
|
| 229 |
+
# operations, we just check the 'have_run' dictionary and carry on.
|
| 230 |
+
# It's only safe to query 'have_run' for a command class that has
|
| 231 |
+
# been instantiated -- a false value will be inserted when the
|
| 232 |
+
# command object is created, and replaced with a true value when
|
| 233 |
+
# the command is successfully run. Thus it's probably best to use
|
| 234 |
+
# '.get()' rather than a straight lookup.
|
| 235 |
+
self.have_run = {}
|
| 236 |
+
|
| 237 |
+
# Now we'll use the attrs dictionary (ultimately, keyword args from
|
| 238 |
+
# the setup script) to possibly override any or all of these
|
| 239 |
+
# distribution options.
|
| 240 |
+
|
| 241 |
+
if attrs:
|
| 242 |
+
# Pull out the set of command options and work on them
|
| 243 |
+
# specifically. Note that this order guarantees that aliased
|
| 244 |
+
# command options will override any supplied redundantly
|
| 245 |
+
# through the general options dictionary.
|
| 246 |
+
options = attrs.get('options')
|
| 247 |
+
if options is not None:
|
| 248 |
+
del attrs['options']
|
| 249 |
+
for (command, cmd_options) in options.items():
|
| 250 |
+
opt_dict = self.get_option_dict(command)
|
| 251 |
+
for (opt, val) in cmd_options.items():
|
| 252 |
+
opt_dict[opt] = ("setup script", val)
|
| 253 |
+
|
| 254 |
+
if 'licence' in attrs:
|
| 255 |
+
attrs['license'] = attrs['licence']
|
| 256 |
+
del attrs['licence']
|
| 257 |
+
msg = "'licence' distribution option is deprecated; use 'license'"
|
| 258 |
+
if warnings is not None:
|
| 259 |
+
warnings.warn(msg)
|
| 260 |
+
else:
|
| 261 |
+
sys.stderr.write(msg + "\n")
|
| 262 |
+
|
| 263 |
+
# Now work on the rest of the attributes. Any attribute that's
|
| 264 |
+
# not already defined is invalid!
|
| 265 |
+
for (key, val) in attrs.items():
|
| 266 |
+
if hasattr(self.metadata, "set_" + key):
|
| 267 |
+
getattr(self.metadata, "set_" + key)(val)
|
| 268 |
+
elif hasattr(self.metadata, key):
|
| 269 |
+
setattr(self.metadata, key, val)
|
| 270 |
+
elif hasattr(self, key):
|
| 271 |
+
setattr(self, key, val)
|
| 272 |
+
else:
|
| 273 |
+
msg = "Unknown distribution option: %s" % repr(key)
|
| 274 |
+
warnings.warn(msg)
|
| 275 |
+
|
| 276 |
+
# no-user-cfg is handled before other command line args
|
| 277 |
+
# because other args override the config files, and this
|
| 278 |
+
# one is needed before we can load the config files.
|
| 279 |
+
# If attrs['script_args'] wasn't passed, assume false.
|
| 280 |
+
#
|
| 281 |
+
# This also make sure we just look at the global options
|
| 282 |
+
self.want_user_cfg = True
|
| 283 |
+
|
| 284 |
+
if self.script_args is not None:
|
| 285 |
+
for arg in self.script_args:
|
| 286 |
+
if not arg.startswith('-'):
|
| 287 |
+
break
|
| 288 |
+
if arg == '--no-user-cfg':
|
| 289 |
+
self.want_user_cfg = False
|
| 290 |
+
break
|
| 291 |
+
|
| 292 |
+
self.finalize_options()
|
| 293 |
+
|
| 294 |
+
def get_option_dict(self, command):
|
| 295 |
+
"""Get the option dictionary for a given command. If that
|
| 296 |
+
command's option dictionary hasn't been created yet, then create it
|
| 297 |
+
and return the new dictionary; otherwise, return the existing
|
| 298 |
+
option dictionary.
|
| 299 |
+
"""
|
| 300 |
+
dict = self.command_options.get(command)
|
| 301 |
+
if dict is None:
|
| 302 |
+
dict = self.command_options[command] = {}
|
| 303 |
+
return dict
|
| 304 |
+
|
| 305 |
+
def dump_option_dicts(self, header=None, commands=None, indent=""):
|
| 306 |
+
from pprint import pformat
|
| 307 |
+
|
| 308 |
+
if commands is None: # dump all command option dicts
|
| 309 |
+
commands = sorted(self.command_options.keys())
|
| 310 |
+
|
| 311 |
+
if header is not None:
|
| 312 |
+
self.announce(indent + header)
|
| 313 |
+
indent = indent + " "
|
| 314 |
+
|
| 315 |
+
if not commands:
|
| 316 |
+
self.announce(indent + "no commands known yet")
|
| 317 |
+
return
|
| 318 |
+
|
| 319 |
+
for cmd_name in commands:
|
| 320 |
+
opt_dict = self.command_options.get(cmd_name)
|
| 321 |
+
if opt_dict is None:
|
| 322 |
+
self.announce(indent +
|
| 323 |
+
"no option dict for '%s' command" % cmd_name)
|
| 324 |
+
else:
|
| 325 |
+
self.announce(indent +
|
| 326 |
+
"option dict for '%s' command:" % cmd_name)
|
| 327 |
+
out = pformat(opt_dict)
|
| 328 |
+
for line in out.split('\n'):
|
| 329 |
+
self.announce(indent + " " + line)
|
| 330 |
+
|
| 331 |
+
# -- Config file finding/parsing methods ---------------------------
|
| 332 |
+
|
| 333 |
+
def find_config_files(self):
|
| 334 |
+
"""Find as many configuration files as should be processed for this
|
| 335 |
+
platform, and return a list of filenames in the order in which they
|
| 336 |
+
should be parsed. The filenames returned are guaranteed to exist
|
| 337 |
+
(modulo nasty race conditions).
|
| 338 |
+
|
| 339 |
+
There are three possible config files: distutils.cfg in the
|
| 340 |
+
Distutils installation directory (ie. where the top-level
|
| 341 |
+
Distutils __inst__.py file lives), a file in the user's home
|
| 342 |
+
directory named .pydistutils.cfg on Unix and pydistutils.cfg
|
| 343 |
+
on Windows/Mac; and setup.cfg in the current directory.
|
| 344 |
+
|
| 345 |
+
The file in the user's home directory can be disabled with the
|
| 346 |
+
--no-user-cfg option.
|
| 347 |
+
"""
|
| 348 |
+
files = []
|
| 349 |
+
check_environ()
|
| 350 |
+
|
| 351 |
+
# Where to look for the system-wide Distutils config file
|
| 352 |
+
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
|
| 353 |
+
|
| 354 |
+
# Look for the system config file
|
| 355 |
+
sys_file = os.path.join(sys_dir, "distutils.cfg")
|
| 356 |
+
if os.path.isfile(sys_file):
|
| 357 |
+
files.append(sys_file)
|
| 358 |
+
|
| 359 |
+
# What to call the per-user config file
|
| 360 |
+
if os.name == 'posix':
|
| 361 |
+
user_filename = ".pydistutils.cfg"
|
| 362 |
+
else:
|
| 363 |
+
user_filename = "pydistutils.cfg"
|
| 364 |
+
|
| 365 |
+
# And look for the user config file
|
| 366 |
+
if self.want_user_cfg:
|
| 367 |
+
user_file = os.path.join(os.path.expanduser('~'), user_filename)
|
| 368 |
+
if os.path.isfile(user_file):
|
| 369 |
+
files.append(user_file)
|
| 370 |
+
|
| 371 |
+
# All platforms support local setup.cfg
|
| 372 |
+
local_file = "setup.cfg"
|
| 373 |
+
if os.path.isfile(local_file):
|
| 374 |
+
files.append(local_file)
|
| 375 |
+
|
| 376 |
+
if DEBUG:
|
| 377 |
+
self.announce("using config files: %s" % ', '.join(files))
|
| 378 |
+
|
| 379 |
+
return files
|
| 380 |
+
|
| 381 |
+
def parse_config_files(self, filenames=None):
|
| 382 |
+
from configparser import ConfigParser
|
| 383 |
+
|
| 384 |
+
# Ignore install directory options if we have a venv
|
| 385 |
+
if sys.prefix != sys.base_prefix:
|
| 386 |
+
ignore_options = [
|
| 387 |
+
'install-base', 'install-platbase', 'install-lib',
|
| 388 |
+
'install-platlib', 'install-purelib', 'install-headers',
|
| 389 |
+
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
|
| 390 |
+
'home', 'user', 'root']
|
| 391 |
+
else:
|
| 392 |
+
ignore_options = []
|
| 393 |
+
|
| 394 |
+
ignore_options = frozenset(ignore_options)
|
| 395 |
+
|
| 396 |
+
if filenames is None:
|
| 397 |
+
filenames = self.find_config_files()
|
| 398 |
+
|
| 399 |
+
if DEBUG:
|
| 400 |
+
self.announce("Distribution.parse_config_files():")
|
| 401 |
+
|
| 402 |
+
parser = ConfigParser()
|
| 403 |
+
for filename in filenames:
|
| 404 |
+
if DEBUG:
|
| 405 |
+
self.announce(" reading %s" % filename)
|
| 406 |
+
parser.read(filename)
|
| 407 |
+
for section in parser.sections():
|
| 408 |
+
options = parser.options(section)
|
| 409 |
+
opt_dict = self.get_option_dict(section)
|
| 410 |
+
|
| 411 |
+
for opt in options:
|
| 412 |
+
if opt != '__name__' and opt not in ignore_options:
|
| 413 |
+
val = parser.get(section,opt)
|
| 414 |
+
opt = opt.replace('-', '_')
|
| 415 |
+
opt_dict[opt] = (filename, val)
|
| 416 |
+
|
| 417 |
+
# Make the ConfigParser forget everything (so we retain
|
| 418 |
+
# the original filenames that options come from)
|
| 419 |
+
parser.__init__()
|
| 420 |
+
|
| 421 |
+
# If there was a "global" section in the config file, use it
|
| 422 |
+
# to set Distribution options.
|
| 423 |
+
|
| 424 |
+
if 'global' in self.command_options:
|
| 425 |
+
for (opt, (src, val)) in self.command_options['global'].items():
|
| 426 |
+
alias = self.negative_opt.get(opt)
|
| 427 |
+
try:
|
| 428 |
+
if alias:
|
| 429 |
+
setattr(self, alias, not strtobool(val))
|
| 430 |
+
elif opt in ('verbose', 'dry_run'): # ugh!
|
| 431 |
+
setattr(self, opt, strtobool(val))
|
| 432 |
+
else:
|
| 433 |
+
setattr(self, opt, val)
|
| 434 |
+
except ValueError as msg:
|
| 435 |
+
raise DistutilsOptionError(msg)
|
| 436 |
+
|
| 437 |
+
# -- Command-line parsing methods ----------------------------------
|
| 438 |
+
|
| 439 |
+
def parse_command_line(self):
|
| 440 |
+
"""Parse the setup script's command line, taken from the
|
| 441 |
+
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
|
| 442 |
+
-- see 'setup()' in core.py). This list is first processed for
|
| 443 |
+
"global options" -- options that set attributes of the Distribution
|
| 444 |
+
instance. Then, it is alternately scanned for Distutils commands
|
| 445 |
+
and options for that command. Each new command terminates the
|
| 446 |
+
options for the previous command. The allowed options for a
|
| 447 |
+
command are determined by the 'user_options' attribute of the
|
| 448 |
+
command class -- thus, we have to be able to load command classes
|
| 449 |
+
in order to parse the command line. Any error in that 'options'
|
| 450 |
+
attribute raises DistutilsGetoptError; any error on the
|
| 451 |
+
command-line raises DistutilsArgError. If no Distutils commands
|
| 452 |
+
were found on the command line, raises DistutilsArgError. Return
|
| 453 |
+
true if command-line was successfully parsed and we should carry
|
| 454 |
+
on with executing commands; false if no errors but we shouldn't
|
| 455 |
+
execute commands (currently, this only happens if user asks for
|
| 456 |
+
help).
|
| 457 |
+
"""
|
| 458 |
+
#
|
| 459 |
+
# We now have enough information to show the Macintosh dialog
|
| 460 |
+
# that allows the user to interactively specify the "command line".
|
| 461 |
+
#
|
| 462 |
+
toplevel_options = self._get_toplevel_options()
|
| 463 |
+
|
| 464 |
+
# We have to parse the command line a bit at a time -- global
|
| 465 |
+
# options, then the first command, then its options, and so on --
|
| 466 |
+
# because each command will be handled by a different class, and
|
| 467 |
+
# the options that are valid for a particular class aren't known
|
| 468 |
+
# until we have loaded the command class, which doesn't happen
|
| 469 |
+
# until we know what the command is.
|
| 470 |
+
|
| 471 |
+
self.commands = []
|
| 472 |
+
parser = FancyGetopt(toplevel_options + self.display_options)
|
| 473 |
+
parser.set_negative_aliases(self.negative_opt)
|
| 474 |
+
parser.set_aliases({'licence': 'license'})
|
| 475 |
+
args = parser.getopt(args=self.script_args, object=self)
|
| 476 |
+
option_order = parser.get_option_order()
|
| 477 |
+
log.set_verbosity(self.verbose)
|
| 478 |
+
|
| 479 |
+
# for display options we return immediately
|
| 480 |
+
if self.handle_display_options(option_order):
|
| 481 |
+
return
|
| 482 |
+
while args:
|
| 483 |
+
args = self._parse_command_opts(parser, args)
|
| 484 |
+
if args is None: # user asked for help (and got it)
|
| 485 |
+
return
|
| 486 |
+
|
| 487 |
+
# Handle the cases of --help as a "global" option, ie.
|
| 488 |
+
# "setup.py --help" and "setup.py --help command ...". For the
|
| 489 |
+
# former, we show global options (--verbose, --dry-run, etc.)
|
| 490 |
+
# and display-only options (--name, --version, etc.); for the
|
| 491 |
+
# latter, we omit the display-only options and show help for
|
| 492 |
+
# each command listed on the command line.
|
| 493 |
+
if self.help:
|
| 494 |
+
self._show_help(parser,
|
| 495 |
+
display_options=len(self.commands) == 0,
|
| 496 |
+
commands=self.commands)
|
| 497 |
+
return
|
| 498 |
+
|
| 499 |
+
# Oops, no commands found -- an end-user error
|
| 500 |
+
if not self.commands:
|
| 501 |
+
raise DistutilsArgError("no commands supplied")
|
| 502 |
+
|
| 503 |
+
# All is well: return true
|
| 504 |
+
return True
|
| 505 |
+
|
| 506 |
+
def _get_toplevel_options(self):
|
| 507 |
+
"""Return the non-display options recognized at the top level.
|
| 508 |
+
|
| 509 |
+
This includes options that are recognized *only* at the top
|
| 510 |
+
level as well as options recognized for commands.
|
| 511 |
+
"""
|
| 512 |
+
return self.global_options + [
|
| 513 |
+
("command-packages=", None,
|
| 514 |
+
"list of packages that provide distutils commands"),
|
| 515 |
+
]
|
| 516 |
+
|
| 517 |
+
def _parse_command_opts(self, parser, args):
|
| 518 |
+
"""Parse the command-line options for a single command.
|
| 519 |
+
'parser' must be a FancyGetopt instance; 'args' must be the list
|
| 520 |
+
of arguments, starting with the current command (whose options
|
| 521 |
+
we are about to parse). Returns a new version of 'args' with
|
| 522 |
+
the next command at the front of the list; will be the empty
|
| 523 |
+
list if there are no more commands on the command line. Returns
|
| 524 |
+
None if the user asked for help on this command.
|
| 525 |
+
"""
|
| 526 |
+
# late import because of mutual dependence between these modules
|
| 527 |
+
from distutils.cmd import Command
|
| 528 |
+
|
| 529 |
+
# Pull the current command from the head of the command line
|
| 530 |
+
command = args[0]
|
| 531 |
+
if not command_re.match(command):
|
| 532 |
+
raise SystemExit("invalid command name '%s'" % command)
|
| 533 |
+
self.commands.append(command)
|
| 534 |
+
|
| 535 |
+
# Dig up the command class that implements this command, so we
|
| 536 |
+
# 1) know that it's a valid command, and 2) know which options
|
| 537 |
+
# it takes.
|
| 538 |
+
try:
|
| 539 |
+
cmd_class = self.get_command_class(command)
|
| 540 |
+
except DistutilsModuleError as msg:
|
| 541 |
+
raise DistutilsArgError(msg)
|
| 542 |
+
|
| 543 |
+
# Require that the command class be derived from Command -- want
|
| 544 |
+
# to be sure that the basic "command" interface is implemented.
|
| 545 |
+
if not issubclass(cmd_class, Command):
|
| 546 |
+
raise DistutilsClassError(
|
| 547 |
+
"command class %s must subclass Command" % cmd_class)
|
| 548 |
+
|
| 549 |
+
# Also make sure that the command object provides a list of its
|
| 550 |
+
# known options.
|
| 551 |
+
if not (hasattr(cmd_class, 'user_options') and
|
| 552 |
+
isinstance(cmd_class.user_options, list)):
|
| 553 |
+
msg = ("command class %s must provide "
|
| 554 |
+
"'user_options' attribute (a list of tuples)")
|
| 555 |
+
raise DistutilsClassError(msg % cmd_class)
|
| 556 |
+
|
| 557 |
+
# If the command class has a list of negative alias options,
|
| 558 |
+
# merge it in with the global negative aliases.
|
| 559 |
+
negative_opt = self.negative_opt
|
| 560 |
+
if hasattr(cmd_class, 'negative_opt'):
|
| 561 |
+
negative_opt = negative_opt.copy()
|
| 562 |
+
negative_opt.update(cmd_class.negative_opt)
|
| 563 |
+
|
| 564 |
+
# Check for help_options in command class. They have a different
|
| 565 |
+
# format (tuple of four) so we need to preprocess them here.
|
| 566 |
+
if (hasattr(cmd_class, 'help_options') and
|
| 567 |
+
isinstance(cmd_class.help_options, list)):
|
| 568 |
+
help_options = fix_help_options(cmd_class.help_options)
|
| 569 |
+
else:
|
| 570 |
+
help_options = []
|
| 571 |
+
|
| 572 |
+
# All commands support the global options too, just by adding
|
| 573 |
+
# in 'global_options'.
|
| 574 |
+
parser.set_option_table(self.global_options +
|
| 575 |
+
cmd_class.user_options +
|
| 576 |
+
help_options)
|
| 577 |
+
parser.set_negative_aliases(negative_opt)
|
| 578 |
+
(args, opts) = parser.getopt(args[1:])
|
| 579 |
+
if hasattr(opts, 'help') and opts.help:
|
| 580 |
+
self._show_help(parser, display_options=0, commands=[cmd_class])
|
| 581 |
+
return
|
| 582 |
+
|
| 583 |
+
if (hasattr(cmd_class, 'help_options') and
|
| 584 |
+
isinstance(cmd_class.help_options, list)):
|
| 585 |
+
help_option_found=0
|
| 586 |
+
for (help_option, short, desc, func) in cmd_class.help_options:
|
| 587 |
+
if hasattr(opts, parser.get_attr_name(help_option)):
|
| 588 |
+
help_option_found=1
|
| 589 |
+
if callable(func):
|
| 590 |
+
func()
|
| 591 |
+
else:
|
| 592 |
+
raise DistutilsClassError(
|
| 593 |
+
"invalid help function %r for help option '%s': "
|
| 594 |
+
"must be a callable object (function, etc.)"
|
| 595 |
+
% (func, help_option))
|
| 596 |
+
|
| 597 |
+
if help_option_found:
|
| 598 |
+
return
|
| 599 |
+
|
| 600 |
+
# Put the options from the command-line into their official
|
| 601 |
+
# holding pen, the 'command_options' dictionary.
|
| 602 |
+
opt_dict = self.get_option_dict(command)
|
| 603 |
+
for (name, value) in vars(opts).items():
|
| 604 |
+
opt_dict[name] = ("command line", value)
|
| 605 |
+
|
| 606 |
+
return args
|
| 607 |
+
|
| 608 |
+
def finalize_options(self):
|
| 609 |
+
"""Set final values for all the options on the Distribution
|
| 610 |
+
instance, analogous to the .finalize_options() method of Command
|
| 611 |
+
objects.
|
| 612 |
+
"""
|
| 613 |
+
for attr in ('keywords', 'platforms'):
|
| 614 |
+
value = getattr(self.metadata, attr)
|
| 615 |
+
if value is None:
|
| 616 |
+
continue
|
| 617 |
+
if isinstance(value, str):
|
| 618 |
+
value = [elm.strip() for elm in value.split(',')]
|
| 619 |
+
setattr(self.metadata, attr, value)
|
| 620 |
+
|
| 621 |
+
def _show_help(self, parser, global_options=1, display_options=1,
|
| 622 |
+
commands=[]):
|
| 623 |
+
"""Show help for the setup script command-line in the form of
|
| 624 |
+
several lists of command-line options. 'parser' should be a
|
| 625 |
+
FancyGetopt instance; do not expect it to be returned in the
|
| 626 |
+
same state, as its option table will be reset to make it
|
| 627 |
+
generate the correct help text.
|
| 628 |
+
|
| 629 |
+
If 'global_options' is true, lists the global options:
|
| 630 |
+
--verbose, --dry-run, etc. If 'display_options' is true, lists
|
| 631 |
+
the "display-only" options: --name, --version, etc. Finally,
|
| 632 |
+
lists per-command help for every command name or command class
|
| 633 |
+
in 'commands'.
|
| 634 |
+
"""
|
| 635 |
+
# late import because of mutual dependence between these modules
|
| 636 |
+
from distutils.core import gen_usage
|
| 637 |
+
from distutils.cmd import Command
|
| 638 |
+
|
| 639 |
+
if global_options:
|
| 640 |
+
if display_options:
|
| 641 |
+
options = self._get_toplevel_options()
|
| 642 |
+
else:
|
| 643 |
+
options = self.global_options
|
| 644 |
+
parser.set_option_table(options)
|
| 645 |
+
parser.print_help(self.common_usage + "\nGlobal options:")
|
| 646 |
+
print('')
|
| 647 |
+
|
| 648 |
+
if display_options:
|
| 649 |
+
parser.set_option_table(self.display_options)
|
| 650 |
+
parser.print_help(
|
| 651 |
+
"Information display options (just display " +
|
| 652 |
+
"information, ignore any commands)")
|
| 653 |
+
print('')
|
| 654 |
+
|
| 655 |
+
for command in self.commands:
|
| 656 |
+
if isinstance(command, type) and issubclass(command, Command):
|
| 657 |
+
klass = command
|
| 658 |
+
else:
|
| 659 |
+
klass = self.get_command_class(command)
|
| 660 |
+
if (hasattr(klass, 'help_options') and
|
| 661 |
+
isinstance(klass.help_options, list)):
|
| 662 |
+
parser.set_option_table(klass.user_options +
|
| 663 |
+
fix_help_options(klass.help_options))
|
| 664 |
+
else:
|
| 665 |
+
parser.set_option_table(klass.user_options)
|
| 666 |
+
parser.print_help("Options for '%s' command:" % klass.__name__)
|
| 667 |
+
print('')
|
| 668 |
+
|
| 669 |
+
print(gen_usage(self.script_name))
|
| 670 |
+
|
| 671 |
+
def handle_display_options(self, option_order):
|
| 672 |
+
"""If there were any non-global "display-only" options
|
| 673 |
+
(--help-commands or the metadata display options) on the command
|
| 674 |
+
line, display the requested info and return true; else return
|
| 675 |
+
false.
|
| 676 |
+
"""
|
| 677 |
+
from distutils.core import gen_usage
|
| 678 |
+
|
| 679 |
+
# User just wants a list of commands -- we'll print it out and stop
|
| 680 |
+
# processing now (ie. if they ran "setup --help-commands foo bar",
|
| 681 |
+
# we ignore "foo bar").
|
| 682 |
+
if self.help_commands:
|
| 683 |
+
self.print_commands()
|
| 684 |
+
print('')
|
| 685 |
+
print(gen_usage(self.script_name))
|
| 686 |
+
return 1
|
| 687 |
+
|
| 688 |
+
# If user supplied any of the "display metadata" options, then
|
| 689 |
+
# display that metadata in the order in which the user supplied the
|
| 690 |
+
# metadata options.
|
| 691 |
+
any_display_options = 0
|
| 692 |
+
is_display_option = {}
|
| 693 |
+
for option in self.display_options:
|
| 694 |
+
is_display_option[option[0]] = 1
|
| 695 |
+
|
| 696 |
+
for (opt, val) in option_order:
|
| 697 |
+
if val and is_display_option.get(opt):
|
| 698 |
+
opt = translate_longopt(opt)
|
| 699 |
+
value = getattr(self.metadata, "get_"+opt)()
|
| 700 |
+
if opt in ['keywords', 'platforms']:
|
| 701 |
+
print(','.join(value))
|
| 702 |
+
elif opt in ('classifiers', 'provides', 'requires',
|
| 703 |
+
'obsoletes'):
|
| 704 |
+
print('\n'.join(value))
|
| 705 |
+
else:
|
| 706 |
+
print(value)
|
| 707 |
+
any_display_options = 1
|
| 708 |
+
|
| 709 |
+
return any_display_options
|
| 710 |
+
|
| 711 |
+
def print_command_list(self, commands, header, max_length):
|
| 712 |
+
"""Print a subset of the list of all commands -- used by
|
| 713 |
+
'print_commands()'.
|
| 714 |
+
"""
|
| 715 |
+
print(header + ":")
|
| 716 |
+
|
| 717 |
+
for cmd in commands:
|
| 718 |
+
klass = self.cmdclass.get(cmd)
|
| 719 |
+
if not klass:
|
| 720 |
+
klass = self.get_command_class(cmd)
|
| 721 |
+
try:
|
| 722 |
+
description = klass.description
|
| 723 |
+
except AttributeError:
|
| 724 |
+
description = "(no description available)"
|
| 725 |
+
|
| 726 |
+
print(" %-*s %s" % (max_length, cmd, description))
|
| 727 |
+
|
| 728 |
+
def print_commands(self):
|
| 729 |
+
"""Print out a help message listing all available commands with a
|
| 730 |
+
description of each. The list is divided into "standard commands"
|
| 731 |
+
(listed in distutils.command.__all__) and "extra commands"
|
| 732 |
+
(mentioned in self.cmdclass, but not a standard command). The
|
| 733 |
+
descriptions come from the command class attribute
|
| 734 |
+
'description'.
|
| 735 |
+
"""
|
| 736 |
+
import distutils.command
|
| 737 |
+
std_commands = distutils.command.__all__
|
| 738 |
+
is_std = {}
|
| 739 |
+
for cmd in std_commands:
|
| 740 |
+
is_std[cmd] = 1
|
| 741 |
+
|
| 742 |
+
extra_commands = []
|
| 743 |
+
for cmd in self.cmdclass.keys():
|
| 744 |
+
if not is_std.get(cmd):
|
| 745 |
+
extra_commands.append(cmd)
|
| 746 |
+
|
| 747 |
+
max_length = 0
|
| 748 |
+
for cmd in (std_commands + extra_commands):
|
| 749 |
+
if len(cmd) > max_length:
|
| 750 |
+
max_length = len(cmd)
|
| 751 |
+
|
| 752 |
+
self.print_command_list(std_commands,
|
| 753 |
+
"Standard commands",
|
| 754 |
+
max_length)
|
| 755 |
+
if extra_commands:
|
| 756 |
+
print()
|
| 757 |
+
self.print_command_list(extra_commands,
|
| 758 |
+
"Extra commands",
|
| 759 |
+
max_length)
|
| 760 |
+
|
| 761 |
+
def get_command_list(self):
|
| 762 |
+
"""Get a list of (command, description) tuples.
|
| 763 |
+
The list is divided into "standard commands" (listed in
|
| 764 |
+
distutils.command.__all__) and "extra commands" (mentioned in
|
| 765 |
+
self.cmdclass, but not a standard command). The descriptions come
|
| 766 |
+
from the command class attribute 'description'.
|
| 767 |
+
"""
|
| 768 |
+
# Currently this is only used on Mac OS, for the Mac-only GUI
|
| 769 |
+
# Distutils interface (by Jack Jansen)
|
| 770 |
+
import distutils.command
|
| 771 |
+
std_commands = distutils.command.__all__
|
| 772 |
+
is_std = {}
|
| 773 |
+
for cmd in std_commands:
|
| 774 |
+
is_std[cmd] = 1
|
| 775 |
+
|
| 776 |
+
extra_commands = []
|
| 777 |
+
for cmd in self.cmdclass.keys():
|
| 778 |
+
if not is_std.get(cmd):
|
| 779 |
+
extra_commands.append(cmd)
|
| 780 |
+
|
| 781 |
+
rv = []
|
| 782 |
+
for cmd in (std_commands + extra_commands):
|
| 783 |
+
klass = self.cmdclass.get(cmd)
|
| 784 |
+
if not klass:
|
| 785 |
+
klass = self.get_command_class(cmd)
|
| 786 |
+
try:
|
| 787 |
+
description = klass.description
|
| 788 |
+
except AttributeError:
|
| 789 |
+
description = "(no description available)"
|
| 790 |
+
rv.append((cmd, description))
|
| 791 |
+
return rv
|
| 792 |
+
|
| 793 |
+
# -- Command class/object methods ----------------------------------
|
| 794 |
+
|
| 795 |
+
def get_command_packages(self):
|
| 796 |
+
"""Return a list of packages from which commands are loaded."""
|
| 797 |
+
pkgs = self.command_packages
|
| 798 |
+
if not isinstance(pkgs, list):
|
| 799 |
+
if pkgs is None:
|
| 800 |
+
pkgs = ''
|
| 801 |
+
pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
|
| 802 |
+
if "distutils.command" not in pkgs:
|
| 803 |
+
pkgs.insert(0, "distutils.command")
|
| 804 |
+
self.command_packages = pkgs
|
| 805 |
+
return pkgs
|
| 806 |
+
|
| 807 |
+
def get_command_class(self, command):
|
| 808 |
+
"""Return the class that implements the Distutils command named by
|
| 809 |
+
'command'. First we check the 'cmdclass' dictionary; if the
|
| 810 |
+
command is mentioned there, we fetch the class object from the
|
| 811 |
+
dictionary and return it. Otherwise we load the command module
|
| 812 |
+
("distutils.command." + command) and fetch the command class from
|
| 813 |
+
the module. The loaded class is also stored in 'cmdclass'
|
| 814 |
+
to speed future calls to 'get_command_class()'.
|
| 815 |
+
|
| 816 |
+
Raises DistutilsModuleError if the expected module could not be
|
| 817 |
+
found, or if that module does not define the expected class.
|
| 818 |
+
"""
|
| 819 |
+
klass = self.cmdclass.get(command)
|
| 820 |
+
if klass:
|
| 821 |
+
return klass
|
| 822 |
+
|
| 823 |
+
for pkgname in self.get_command_packages():
|
| 824 |
+
module_name = "%s.%s" % (pkgname, command)
|
| 825 |
+
klass_name = command
|
| 826 |
+
|
| 827 |
+
try:
|
| 828 |
+
__import__(module_name)
|
| 829 |
+
module = sys.modules[module_name]
|
| 830 |
+
except ImportError:
|
| 831 |
+
continue
|
| 832 |
+
|
| 833 |
+
try:
|
| 834 |
+
klass = getattr(module, klass_name)
|
| 835 |
+
except AttributeError:
|
| 836 |
+
raise DistutilsModuleError(
|
| 837 |
+
"invalid command '%s' (no class '%s' in module '%s')"
|
| 838 |
+
% (command, klass_name, module_name))
|
| 839 |
+
|
| 840 |
+
self.cmdclass[command] = klass
|
| 841 |
+
return klass
|
| 842 |
+
|
| 843 |
+
raise DistutilsModuleError("invalid command '%s'" % command)
|
| 844 |
+
|
| 845 |
+
def get_command_obj(self, command, create=1):
|
| 846 |
+
"""Return the command object for 'command'. Normally this object
|
| 847 |
+
is cached on a previous call to 'get_command_obj()'; if no command
|
| 848 |
+
object for 'command' is in the cache, then we either create and
|
| 849 |
+
return it (if 'create' is true) or return None.
|
| 850 |
+
"""
|
| 851 |
+
cmd_obj = self.command_obj.get(command)
|
| 852 |
+
if not cmd_obj and create:
|
| 853 |
+
if DEBUG:
|
| 854 |
+
self.announce("Distribution.get_command_obj(): "
|
| 855 |
+
"creating '%s' command object" % command)
|
| 856 |
+
|
| 857 |
+
klass = self.get_command_class(command)
|
| 858 |
+
cmd_obj = self.command_obj[command] = klass(self)
|
| 859 |
+
self.have_run[command] = 0
|
| 860 |
+
|
| 861 |
+
# Set any options that were supplied in config files
|
| 862 |
+
# or on the command line. (NB. support for error
|
| 863 |
+
# reporting is lame here: any errors aren't reported
|
| 864 |
+
# until 'finalize_options()' is called, which means
|
| 865 |
+
# we won't report the source of the error.)
|
| 866 |
+
options = self.command_options.get(command)
|
| 867 |
+
if options:
|
| 868 |
+
self._set_command_options(cmd_obj, options)
|
| 869 |
+
|
| 870 |
+
return cmd_obj
|
| 871 |
+
|
| 872 |
+
def _set_command_options(self, command_obj, option_dict=None):
|
| 873 |
+
"""Set the options for 'command_obj' from 'option_dict'. Basically
|
| 874 |
+
this means copying elements of a dictionary ('option_dict') to
|
| 875 |
+
attributes of an instance ('command').
|
| 876 |
+
|
| 877 |
+
'command_obj' must be a Command instance. If 'option_dict' is not
|
| 878 |
+
supplied, uses the standard option dictionary for this command
|
| 879 |
+
(from 'self.command_options').
|
| 880 |
+
"""
|
| 881 |
+
command_name = command_obj.get_command_name()
|
| 882 |
+
if option_dict is None:
|
| 883 |
+
option_dict = self.get_option_dict(command_name)
|
| 884 |
+
|
| 885 |
+
if DEBUG:
|
| 886 |
+
self.announce(" setting options for '%s' command:" % command_name)
|
| 887 |
+
for (option, (source, value)) in option_dict.items():
|
| 888 |
+
if DEBUG:
|
| 889 |
+
self.announce(" %s = %s (from %s)" % (option, value,
|
| 890 |
+
source))
|
| 891 |
+
try:
|
| 892 |
+
bool_opts = [translate_longopt(o)
|
| 893 |
+
for o in command_obj.boolean_options]
|
| 894 |
+
except AttributeError:
|
| 895 |
+
bool_opts = []
|
| 896 |
+
try:
|
| 897 |
+
neg_opt = command_obj.negative_opt
|
| 898 |
+
except AttributeError:
|
| 899 |
+
neg_opt = {}
|
| 900 |
+
|
| 901 |
+
try:
|
| 902 |
+
is_string = isinstance(value, str)
|
| 903 |
+
if option in neg_opt and is_string:
|
| 904 |
+
setattr(command_obj, neg_opt[option], not strtobool(value))
|
| 905 |
+
elif option in bool_opts and is_string:
|
| 906 |
+
setattr(command_obj, option, strtobool(value))
|
| 907 |
+
elif hasattr(command_obj, option):
|
| 908 |
+
setattr(command_obj, option, value)
|
| 909 |
+
else:
|
| 910 |
+
raise DistutilsOptionError(
|
| 911 |
+
"error in %s: command '%s' has no such option '%s'"
|
| 912 |
+
% (source, command_name, option))
|
| 913 |
+
except ValueError as msg:
|
| 914 |
+
raise DistutilsOptionError(msg)
|
| 915 |
+
|
| 916 |
+
def reinitialize_command(self, command, reinit_subcommands=0):
|
| 917 |
+
"""Reinitializes a command to the state it was in when first
|
| 918 |
+
returned by 'get_command_obj()': ie., initialized but not yet
|
| 919 |
+
finalized. This provides the opportunity to sneak option
|
| 920 |
+
values in programmatically, overriding or supplementing
|
| 921 |
+
user-supplied values from the config files and command line.
|
| 922 |
+
You'll have to re-finalize the command object (by calling
|
| 923 |
+
'finalize_options()' or 'ensure_finalized()') before using it for
|
| 924 |
+
real.
|
| 925 |
+
|
| 926 |
+
'command' should be a command name (string) or command object. If
|
| 927 |
+
'reinit_subcommands' is true, also reinitializes the command's
|
| 928 |
+
sub-commands, as declared by the 'sub_commands' class attribute (if
|
| 929 |
+
it has one). See the "install" command for an example. Only
|
| 930 |
+
reinitializes the sub-commands that actually matter, ie. those
|
| 931 |
+
whose test predicates return true.
|
| 932 |
+
|
| 933 |
+
Returns the reinitialized command object.
|
| 934 |
+
"""
|
| 935 |
+
from distutils.cmd import Command
|
| 936 |
+
if not isinstance(command, Command):
|
| 937 |
+
command_name = command
|
| 938 |
+
command = self.get_command_obj(command_name)
|
| 939 |
+
else:
|
| 940 |
+
command_name = command.get_command_name()
|
| 941 |
+
|
| 942 |
+
if not command.finalized:
|
| 943 |
+
return command
|
| 944 |
+
command.initialize_options()
|
| 945 |
+
command.finalized = 0
|
| 946 |
+
self.have_run[command_name] = 0
|
| 947 |
+
self._set_command_options(command)
|
| 948 |
+
|
| 949 |
+
if reinit_subcommands:
|
| 950 |
+
for sub in command.get_sub_commands():
|
| 951 |
+
self.reinitialize_command(sub, reinit_subcommands)
|
| 952 |
+
|
| 953 |
+
return command
|
| 954 |
+
|
| 955 |
+
# -- Methods that operate on the Distribution ----------------------
|
| 956 |
+
|
| 957 |
+
def announce(self, msg, level=log.INFO):
|
| 958 |
+
log.log(level, msg)
|
| 959 |
+
|
| 960 |
+
def run_commands(self):
|
| 961 |
+
"""Run each command that was seen on the setup script command line.
|
| 962 |
+
Uses the list of commands found and cache of command objects
|
| 963 |
+
created by 'get_command_obj()'.
|
| 964 |
+
"""
|
| 965 |
+
for cmd in self.commands:
|
| 966 |
+
self.run_command(cmd)
|
| 967 |
+
|
| 968 |
+
# -- Methods that operate on its Commands --------------------------
|
| 969 |
+
|
| 970 |
+
def run_command(self, command):
|
| 971 |
+
"""Do whatever it takes to run a command (including nothing at all,
|
| 972 |
+
if the command has already been run). Specifically: if we have
|
| 973 |
+
already created and run the command named by 'command', return
|
| 974 |
+
silently without doing anything. If the command named by 'command'
|
| 975 |
+
doesn't even have a command object yet, create one. Then invoke
|
| 976 |
+
'run()' on that command object (or an existing one).
|
| 977 |
+
"""
|
| 978 |
+
# Already been here, done that? then return silently.
|
| 979 |
+
if self.have_run.get(command):
|
| 980 |
+
return
|
| 981 |
+
|
| 982 |
+
log.info("running %s", command)
|
| 983 |
+
cmd_obj = self.get_command_obj(command)
|
| 984 |
+
cmd_obj.ensure_finalized()
|
| 985 |
+
cmd_obj.run()
|
| 986 |
+
self.have_run[command] = 1
|
| 987 |
+
|
| 988 |
+
# -- Distribution query methods ------------------------------------
|
| 989 |
+
|
| 990 |
+
def has_pure_modules(self):
|
| 991 |
+
return len(self.packages or self.py_modules or []) > 0
|
| 992 |
+
|
| 993 |
+
def has_ext_modules(self):
|
| 994 |
+
return self.ext_modules and len(self.ext_modules) > 0
|
| 995 |
+
|
| 996 |
+
def has_c_libraries(self):
|
| 997 |
+
return self.libraries and len(self.libraries) > 0
|
| 998 |
+
|
| 999 |
+
def has_modules(self):
|
| 1000 |
+
return self.has_pure_modules() or self.has_ext_modules()
|
| 1001 |
+
|
| 1002 |
+
def has_headers(self):
|
| 1003 |
+
return self.headers and len(self.headers) > 0
|
| 1004 |
+
|
| 1005 |
+
def has_scripts(self):
|
| 1006 |
+
return self.scripts and len(self.scripts) > 0
|
| 1007 |
+
|
| 1008 |
+
def has_data_files(self):
|
| 1009 |
+
return self.data_files and len(self.data_files) > 0
|
| 1010 |
+
|
| 1011 |
+
def is_pure(self):
|
| 1012 |
+
return (self.has_pure_modules() and
|
| 1013 |
+
not self.has_ext_modules() and
|
| 1014 |
+
not self.has_c_libraries())
|
| 1015 |
+
|
| 1016 |
+
# -- Metadata query methods ----------------------------------------
|
| 1017 |
+
|
| 1018 |
+
# If you're looking for 'get_name()', 'get_version()', and so forth,
|
| 1019 |
+
# they are defined in a sneaky way: the constructor binds self.get_XXX
|
| 1020 |
+
# to self.metadata.get_XXX. The actual code is in the
|
| 1021 |
+
# DistributionMetadata class, below.
|
| 1022 |
+
|
| 1023 |
+
class DistributionMetadata:
|
| 1024 |
+
"""Dummy class to hold the distribution meta-data: name, version,
|
| 1025 |
+
author, and so forth.
|
| 1026 |
+
"""
|
| 1027 |
+
|
| 1028 |
+
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
|
| 1029 |
+
"maintainer", "maintainer_email", "url",
|
| 1030 |
+
"license", "description", "long_description",
|
| 1031 |
+
"keywords", "platforms", "fullname", "contact",
|
| 1032 |
+
"contact_email", "classifiers", "download_url",
|
| 1033 |
+
# PEP 314
|
| 1034 |
+
"provides", "requires", "obsoletes",
|
| 1035 |
+
)
|
| 1036 |
+
|
| 1037 |
+
def __init__(self, path=None):
|
| 1038 |
+
if path is not None:
|
| 1039 |
+
self.read_pkg_file(open(path))
|
| 1040 |
+
else:
|
| 1041 |
+
self.name = None
|
| 1042 |
+
self.version = None
|
| 1043 |
+
self.author = None
|
| 1044 |
+
self.author_email = None
|
| 1045 |
+
self.maintainer = None
|
| 1046 |
+
self.maintainer_email = None
|
| 1047 |
+
self.url = None
|
| 1048 |
+
self.license = None
|
| 1049 |
+
self.description = None
|
| 1050 |
+
self.long_description = None
|
| 1051 |
+
self.keywords = None
|
| 1052 |
+
self.platforms = None
|
| 1053 |
+
self.classifiers = None
|
| 1054 |
+
self.download_url = None
|
| 1055 |
+
# PEP 314
|
| 1056 |
+
self.provides = None
|
| 1057 |
+
self.requires = None
|
| 1058 |
+
self.obsoletes = None
|
| 1059 |
+
|
| 1060 |
+
def read_pkg_file(self, file):
|
| 1061 |
+
"""Reads the metadata values from a file object."""
|
| 1062 |
+
msg = message_from_file(file)
|
| 1063 |
+
|
| 1064 |
+
def _read_field(name):
|
| 1065 |
+
value = msg[name]
|
| 1066 |
+
if value == 'UNKNOWN':
|
| 1067 |
+
return None
|
| 1068 |
+
return value
|
| 1069 |
+
|
| 1070 |
+
def _read_list(name):
|
| 1071 |
+
values = msg.get_all(name, None)
|
| 1072 |
+
if values == []:
|
| 1073 |
+
return None
|
| 1074 |
+
return values
|
| 1075 |
+
|
| 1076 |
+
metadata_version = msg['metadata-version']
|
| 1077 |
+
self.name = _read_field('name')
|
| 1078 |
+
self.version = _read_field('version')
|
| 1079 |
+
self.description = _read_field('summary')
|
| 1080 |
+
# we are filling author only.
|
| 1081 |
+
self.author = _read_field('author')
|
| 1082 |
+
self.maintainer = None
|
| 1083 |
+
self.author_email = _read_field('author-email')
|
| 1084 |
+
self.maintainer_email = None
|
| 1085 |
+
self.url = _read_field('home-page')
|
| 1086 |
+
self.license = _read_field('license')
|
| 1087 |
+
|
| 1088 |
+
if 'download-url' in msg:
|
| 1089 |
+
self.download_url = _read_field('download-url')
|
| 1090 |
+
else:
|
| 1091 |
+
self.download_url = None
|
| 1092 |
+
|
| 1093 |
+
self.long_description = _read_field('description')
|
| 1094 |
+
self.description = _read_field('summary')
|
| 1095 |
+
|
| 1096 |
+
if 'keywords' in msg:
|
| 1097 |
+
self.keywords = _read_field('keywords').split(',')
|
| 1098 |
+
|
| 1099 |
+
self.platforms = _read_list('platform')
|
| 1100 |
+
self.classifiers = _read_list('classifier')
|
| 1101 |
+
|
| 1102 |
+
# PEP 314 - these fields only exist in 1.1
|
| 1103 |
+
if metadata_version == '1.1':
|
| 1104 |
+
self.requires = _read_list('requires')
|
| 1105 |
+
self.provides = _read_list('provides')
|
| 1106 |
+
self.obsoletes = _read_list('obsoletes')
|
| 1107 |
+
else:
|
| 1108 |
+
self.requires = None
|
| 1109 |
+
self.provides = None
|
| 1110 |
+
self.obsoletes = None
|
| 1111 |
+
|
| 1112 |
+
def write_pkg_info(self, base_dir):
|
| 1113 |
+
"""Write the PKG-INFO file into the release tree.
|
| 1114 |
+
"""
|
| 1115 |
+
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
|
| 1116 |
+
encoding='UTF-8') as pkg_info:
|
| 1117 |
+
self.write_pkg_file(pkg_info)
|
| 1118 |
+
|
| 1119 |
+
def write_pkg_file(self, file):
|
| 1120 |
+
"""Write the PKG-INFO format data to a file object.
|
| 1121 |
+
"""
|
| 1122 |
+
version = '1.0'
|
| 1123 |
+
if (self.provides or self.requires or self.obsoletes or
|
| 1124 |
+
self.classifiers or self.download_url):
|
| 1125 |
+
version = '1.1'
|
| 1126 |
+
|
| 1127 |
+
file.write('Metadata-Version: %s\n' % version)
|
| 1128 |
+
file.write('Name: %s\n' % self.get_name())
|
| 1129 |
+
file.write('Version: %s\n' % self.get_version())
|
| 1130 |
+
file.write('Summary: %s\n' % self.get_description())
|
| 1131 |
+
file.write('Home-page: %s\n' % self.get_url())
|
| 1132 |
+
file.write('Author: %s\n' % self.get_contact())
|
| 1133 |
+
file.write('Author-email: %s\n' % self.get_contact_email())
|
| 1134 |
+
file.write('License: %s\n' % self.get_license())
|
| 1135 |
+
if self.download_url:
|
| 1136 |
+
file.write('Download-URL: %s\n' % self.download_url)
|
| 1137 |
+
|
| 1138 |
+
long_desc = rfc822_escape(self.get_long_description())
|
| 1139 |
+
file.write('Description: %s\n' % long_desc)
|
| 1140 |
+
|
| 1141 |
+
keywords = ','.join(self.get_keywords())
|
| 1142 |
+
if keywords:
|
| 1143 |
+
file.write('Keywords: %s\n' % keywords)
|
| 1144 |
+
|
| 1145 |
+
self._write_list(file, 'Platform', self.get_platforms())
|
| 1146 |
+
self._write_list(file, 'Classifier', self.get_classifiers())
|
| 1147 |
+
|
| 1148 |
+
# PEP 314
|
| 1149 |
+
self._write_list(file, 'Requires', self.get_requires())
|
| 1150 |
+
self._write_list(file, 'Provides', self.get_provides())
|
| 1151 |
+
self._write_list(file, 'Obsoletes', self.get_obsoletes())
|
| 1152 |
+
|
| 1153 |
+
def _write_list(self, file, name, values):
|
| 1154 |
+
for value in values:
|
| 1155 |
+
file.write('%s: %s\n' % (name, value))
|
| 1156 |
+
|
| 1157 |
+
# -- Metadata query methods ----------------------------------------
|
| 1158 |
+
|
| 1159 |
+
def get_name(self):
|
| 1160 |
+
return self.name or "UNKNOWN"
|
| 1161 |
+
|
| 1162 |
+
def get_version(self):
|
| 1163 |
+
return self.version or "0.0.0"
|
| 1164 |
+
|
| 1165 |
+
def get_fullname(self):
|
| 1166 |
+
return "%s-%s" % (self.get_name(), self.get_version())
|
| 1167 |
+
|
| 1168 |
+
def get_author(self):
|
| 1169 |
+
return self.author or "UNKNOWN"
|
| 1170 |
+
|
| 1171 |
+
def get_author_email(self):
|
| 1172 |
+
return self.author_email or "UNKNOWN"
|
| 1173 |
+
|
| 1174 |
+
def get_maintainer(self):
|
| 1175 |
+
return self.maintainer or "UNKNOWN"
|
| 1176 |
+
|
| 1177 |
+
def get_maintainer_email(self):
|
| 1178 |
+
return self.maintainer_email or "UNKNOWN"
|
| 1179 |
+
|
| 1180 |
+
def get_contact(self):
|
| 1181 |
+
return self.maintainer or self.author or "UNKNOWN"
|
| 1182 |
+
|
| 1183 |
+
def get_contact_email(self):
|
| 1184 |
+
return self.maintainer_email or self.author_email or "UNKNOWN"
|
| 1185 |
+
|
| 1186 |
+
def get_url(self):
|
| 1187 |
+
return self.url or "UNKNOWN"
|
| 1188 |
+
|
| 1189 |
+
def get_license(self):
|
| 1190 |
+
return self.license or "UNKNOWN"
|
| 1191 |
+
get_licence = get_license
|
| 1192 |
+
|
| 1193 |
+
def get_description(self):
|
| 1194 |
+
return self.description or "UNKNOWN"
|
| 1195 |
+
|
| 1196 |
+
def get_long_description(self):
|
| 1197 |
+
return self.long_description or "UNKNOWN"
|
| 1198 |
+
|
| 1199 |
+
def get_keywords(self):
|
| 1200 |
+
return self.keywords or []
|
| 1201 |
+
|
| 1202 |
+
def set_keywords(self, value):
|
| 1203 |
+
self.keywords = _ensure_list(value, 'keywords')
|
| 1204 |
+
|
| 1205 |
+
def get_platforms(self):
|
| 1206 |
+
return self.platforms or ["UNKNOWN"]
|
| 1207 |
+
|
| 1208 |
+
def set_platforms(self, value):
|
| 1209 |
+
self.platforms = _ensure_list(value, 'platforms')
|
| 1210 |
+
|
| 1211 |
+
def get_classifiers(self):
|
| 1212 |
+
return self.classifiers or []
|
| 1213 |
+
|
| 1214 |
+
def set_classifiers(self, value):
|
| 1215 |
+
self.classifiers = _ensure_list(value, 'classifiers')
|
| 1216 |
+
|
| 1217 |
+
def get_download_url(self):
|
| 1218 |
+
return self.download_url or "UNKNOWN"
|
| 1219 |
+
|
| 1220 |
+
# PEP 314
|
| 1221 |
+
def get_requires(self):
|
| 1222 |
+
return self.requires or []
|
| 1223 |
+
|
| 1224 |
+
def set_requires(self, value):
|
| 1225 |
+
import distutils.versionpredicate
|
| 1226 |
+
for v in value:
|
| 1227 |
+
distutils.versionpredicate.VersionPredicate(v)
|
| 1228 |
+
self.requires = list(value)
|
| 1229 |
+
|
| 1230 |
+
def get_provides(self):
|
| 1231 |
+
return self.provides or []
|
| 1232 |
+
|
| 1233 |
+
def set_provides(self, value):
|
| 1234 |
+
value = [v.strip() for v in value]
|
| 1235 |
+
for v in value:
|
| 1236 |
+
import distutils.versionpredicate
|
| 1237 |
+
distutils.versionpredicate.split_provision(v)
|
| 1238 |
+
self.provides = value
|
| 1239 |
+
|
| 1240 |
+
def get_obsoletes(self):
|
| 1241 |
+
return self.obsoletes or []
|
| 1242 |
+
|
| 1243 |
+
def set_obsoletes(self, value):
|
| 1244 |
+
import distutils.versionpredicate
|
| 1245 |
+
for v in value:
|
| 1246 |
+
distutils.versionpredicate.VersionPredicate(v)
|
| 1247 |
+
self.obsoletes = list(value)
|
| 1248 |
+
|
| 1249 |
+
def fix_help_options(options):
|
| 1250 |
+
"""Convert a 4-tuple 'help_options' list as found in various command
|
| 1251 |
+
classes to the 3-tuple form required by FancyGetopt.
|
| 1252 |
+
"""
|
| 1253 |
+
new_options = []
|
| 1254 |
+
for help_tuple in options:
|
| 1255 |
+
new_options.append(help_tuple[0:3])
|
| 1256 |
+
return new_options
|
evalkit_llava/lib/python3.10/distutils/extension.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.extension
|
| 2 |
+
|
| 3 |
+
Provides the Extension class, used to describe C/C++ extension
|
| 4 |
+
modules in setup scripts."""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import re
|
| 8 |
+
import warnings
|
| 9 |
+
|
| 10 |
+
# This class is really only used by the "build_ext" command, so it might
|
| 11 |
+
# make sense to put it in distutils.command.build_ext. However, that
|
| 12 |
+
# module is already big enough, and I want to make this class a bit more
|
| 13 |
+
# complex to simplify some common cases ("foo" module in "foo.c") and do
|
| 14 |
+
# better error-checking ("foo.c" actually exists).
|
| 15 |
+
#
|
| 16 |
+
# Also, putting this in build_ext.py means every setup script would have to
|
| 17 |
+
# import that large-ish module (indirectly, through distutils.core) in
|
| 18 |
+
# order to do anything.
|
| 19 |
+
|
| 20 |
+
class Extension:
|
| 21 |
+
"""Just a collection of attributes that describes an extension
|
| 22 |
+
module and everything needed to build it (hopefully in a portable
|
| 23 |
+
way, but there are hooks that let you be as unportable as you need).
|
| 24 |
+
|
| 25 |
+
Instance attributes:
|
| 26 |
+
name : string
|
| 27 |
+
the full name of the extension, including any packages -- ie.
|
| 28 |
+
*not* a filename or pathname, but Python dotted name
|
| 29 |
+
sources : [string]
|
| 30 |
+
list of source filenames, relative to the distribution root
|
| 31 |
+
(where the setup script lives), in Unix form (slash-separated)
|
| 32 |
+
for portability. Source files may be C, C++, SWIG (.i),
|
| 33 |
+
platform-specific resource files, or whatever else is recognized
|
| 34 |
+
by the "build_ext" command as source for a Python extension.
|
| 35 |
+
include_dirs : [string]
|
| 36 |
+
list of directories to search for C/C++ header files (in Unix
|
| 37 |
+
form for portability)
|
| 38 |
+
define_macros : [(name : string, value : string|None)]
|
| 39 |
+
list of macros to define; each macro is defined using a 2-tuple,
|
| 40 |
+
where 'value' is either the string to define it to or None to
|
| 41 |
+
define it without a particular value (equivalent of "#define
|
| 42 |
+
FOO" in source or -DFOO on Unix C compiler command line)
|
| 43 |
+
undef_macros : [string]
|
| 44 |
+
list of macros to undefine explicitly
|
| 45 |
+
library_dirs : [string]
|
| 46 |
+
list of directories to search for C/C++ libraries at link time
|
| 47 |
+
libraries : [string]
|
| 48 |
+
list of library names (not filenames or paths) to link against
|
| 49 |
+
runtime_library_dirs : [string]
|
| 50 |
+
list of directories to search for C/C++ libraries at run time
|
| 51 |
+
(for shared extensions, this is when the extension is loaded)
|
| 52 |
+
extra_objects : [string]
|
| 53 |
+
list of extra files to link with (eg. object files not implied
|
| 54 |
+
by 'sources', static library that must be explicitly specified,
|
| 55 |
+
binary resource files, etc.)
|
| 56 |
+
extra_compile_args : [string]
|
| 57 |
+
any extra platform- and compiler-specific information to use
|
| 58 |
+
when compiling the source files in 'sources'. For platforms and
|
| 59 |
+
compilers where "command line" makes sense, this is typically a
|
| 60 |
+
list of command-line arguments, but for other platforms it could
|
| 61 |
+
be anything.
|
| 62 |
+
extra_link_args : [string]
|
| 63 |
+
any extra platform- and compiler-specific information to use
|
| 64 |
+
when linking object files together to create the extension (or
|
| 65 |
+
to create a new static Python interpreter). Similar
|
| 66 |
+
interpretation as for 'extra_compile_args'.
|
| 67 |
+
export_symbols : [string]
|
| 68 |
+
list of symbols to be exported from a shared extension. Not
|
| 69 |
+
used on all platforms, and not generally necessary for Python
|
| 70 |
+
extensions, which typically export exactly one symbol: "init" +
|
| 71 |
+
extension_name.
|
| 72 |
+
swig_opts : [string]
|
| 73 |
+
any extra options to pass to SWIG if a source file has the .i
|
| 74 |
+
extension.
|
| 75 |
+
depends : [string]
|
| 76 |
+
list of files that the extension depends on
|
| 77 |
+
language : string
|
| 78 |
+
extension language (i.e. "c", "c++", "objc"). Will be detected
|
| 79 |
+
from the source extensions if not provided.
|
| 80 |
+
optional : boolean
|
| 81 |
+
specifies that a build failure in the extension should not abort the
|
| 82 |
+
build process, but simply not install the failing extension.
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
# When adding arguments to this constructor, be sure to update
|
| 86 |
+
# setup_keywords in core.py.
|
| 87 |
+
def __init__(self, name, sources,
|
| 88 |
+
include_dirs=None,
|
| 89 |
+
define_macros=None,
|
| 90 |
+
undef_macros=None,
|
| 91 |
+
library_dirs=None,
|
| 92 |
+
libraries=None,
|
| 93 |
+
runtime_library_dirs=None,
|
| 94 |
+
extra_objects=None,
|
| 95 |
+
extra_compile_args=None,
|
| 96 |
+
extra_link_args=None,
|
| 97 |
+
export_symbols=None,
|
| 98 |
+
swig_opts = None,
|
| 99 |
+
depends=None,
|
| 100 |
+
language=None,
|
| 101 |
+
optional=None,
|
| 102 |
+
**kw # To catch unknown keywords
|
| 103 |
+
):
|
| 104 |
+
if not isinstance(name, str):
|
| 105 |
+
raise AssertionError("'name' must be a string")
|
| 106 |
+
if not (isinstance(sources, list) and
|
| 107 |
+
all(isinstance(v, str) for v in sources)):
|
| 108 |
+
raise AssertionError("'sources' must be a list of strings")
|
| 109 |
+
|
| 110 |
+
self.name = name
|
| 111 |
+
self.sources = sources
|
| 112 |
+
self.include_dirs = include_dirs or []
|
| 113 |
+
self.define_macros = define_macros or []
|
| 114 |
+
self.undef_macros = undef_macros or []
|
| 115 |
+
self.library_dirs = library_dirs or []
|
| 116 |
+
self.libraries = libraries or []
|
| 117 |
+
self.runtime_library_dirs = runtime_library_dirs or []
|
| 118 |
+
self.extra_objects = extra_objects or []
|
| 119 |
+
self.extra_compile_args = extra_compile_args or []
|
| 120 |
+
self.extra_link_args = extra_link_args or []
|
| 121 |
+
self.export_symbols = export_symbols or []
|
| 122 |
+
self.swig_opts = swig_opts or []
|
| 123 |
+
self.depends = depends or []
|
| 124 |
+
self.language = language
|
| 125 |
+
self.optional = optional
|
| 126 |
+
|
| 127 |
+
# If there are unknown keyword options, warn about them
|
| 128 |
+
if len(kw) > 0:
|
| 129 |
+
options = [repr(option) for option in kw]
|
| 130 |
+
options = ', '.join(sorted(options))
|
| 131 |
+
msg = "Unknown Extension options: %s" % options
|
| 132 |
+
warnings.warn(msg)
|
| 133 |
+
|
| 134 |
+
def __repr__(self):
|
| 135 |
+
return '<%s.%s(%r) at %#x>' % (
|
| 136 |
+
self.__class__.__module__,
|
| 137 |
+
self.__class__.__qualname__,
|
| 138 |
+
self.name,
|
| 139 |
+
id(self))
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def read_setup_file(filename):
|
| 143 |
+
"""Reads a Setup file and returns Extension instances."""
|
| 144 |
+
from distutils.sysconfig import (parse_makefile, expand_makefile_vars,
|
| 145 |
+
_variable_rx)
|
| 146 |
+
|
| 147 |
+
from distutils.text_file import TextFile
|
| 148 |
+
from distutils.util import split_quoted
|
| 149 |
+
|
| 150 |
+
# First pass over the file to gather "VAR = VALUE" assignments.
|
| 151 |
+
vars = parse_makefile(filename)
|
| 152 |
+
|
| 153 |
+
# Second pass to gobble up the real content: lines of the form
|
| 154 |
+
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
|
| 155 |
+
file = TextFile(filename,
|
| 156 |
+
strip_comments=1, skip_blanks=1, join_lines=1,
|
| 157 |
+
lstrip_ws=1, rstrip_ws=1)
|
| 158 |
+
try:
|
| 159 |
+
extensions = []
|
| 160 |
+
|
| 161 |
+
while True:
|
| 162 |
+
line = file.readline()
|
| 163 |
+
if line is None: # eof
|
| 164 |
+
break
|
| 165 |
+
if re.match(_variable_rx, line): # VAR=VALUE, handled in first pass
|
| 166 |
+
continue
|
| 167 |
+
|
| 168 |
+
if line[0] == line[-1] == "*":
|
| 169 |
+
file.warn("'%s' lines not handled yet" % line)
|
| 170 |
+
continue
|
| 171 |
+
|
| 172 |
+
line = expand_makefile_vars(line, vars)
|
| 173 |
+
words = split_quoted(line)
|
| 174 |
+
|
| 175 |
+
# NB. this parses a slightly different syntax than the old
|
| 176 |
+
# makesetup script: here, there must be exactly one extension per
|
| 177 |
+
# line, and it must be the first word of the line. I have no idea
|
| 178 |
+
# why the old syntax supported multiple extensions per line, as
|
| 179 |
+
# they all wind up being the same.
|
| 180 |
+
|
| 181 |
+
module = words[0]
|
| 182 |
+
ext = Extension(module, [])
|
| 183 |
+
append_next_word = None
|
| 184 |
+
|
| 185 |
+
for word in words[1:]:
|
| 186 |
+
if append_next_word is not None:
|
| 187 |
+
append_next_word.append(word)
|
| 188 |
+
append_next_word = None
|
| 189 |
+
continue
|
| 190 |
+
|
| 191 |
+
suffix = os.path.splitext(word)[1]
|
| 192 |
+
switch = word[0:2] ; value = word[2:]
|
| 193 |
+
|
| 194 |
+
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
|
| 195 |
+
# hmm, should we do something about C vs. C++ sources?
|
| 196 |
+
# or leave it up to the CCompiler implementation to
|
| 197 |
+
# worry about?
|
| 198 |
+
ext.sources.append(word)
|
| 199 |
+
elif switch == "-I":
|
| 200 |
+
ext.include_dirs.append(value)
|
| 201 |
+
elif switch == "-D":
|
| 202 |
+
equals = value.find("=")
|
| 203 |
+
if equals == -1: # bare "-DFOO" -- no value
|
| 204 |
+
ext.define_macros.append((value, None))
|
| 205 |
+
else: # "-DFOO=blah"
|
| 206 |
+
ext.define_macros.append((value[0:equals],
|
| 207 |
+
value[equals+2:]))
|
| 208 |
+
elif switch == "-U":
|
| 209 |
+
ext.undef_macros.append(value)
|
| 210 |
+
elif switch == "-C": # only here 'cause makesetup has it!
|
| 211 |
+
ext.extra_compile_args.append(word)
|
| 212 |
+
elif switch == "-l":
|
| 213 |
+
ext.libraries.append(value)
|
| 214 |
+
elif switch == "-L":
|
| 215 |
+
ext.library_dirs.append(value)
|
| 216 |
+
elif switch == "-R":
|
| 217 |
+
ext.runtime_library_dirs.append(value)
|
| 218 |
+
elif word == "-rpath":
|
| 219 |
+
append_next_word = ext.runtime_library_dirs
|
| 220 |
+
elif word == "-Xlinker":
|
| 221 |
+
append_next_word = ext.extra_link_args
|
| 222 |
+
elif word == "-Xcompiler":
|
| 223 |
+
append_next_word = ext.extra_compile_args
|
| 224 |
+
elif switch == "-u":
|
| 225 |
+
ext.extra_link_args.append(word)
|
| 226 |
+
if not value:
|
| 227 |
+
append_next_word = ext.extra_link_args
|
| 228 |
+
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
|
| 229 |
+
# NB. a really faithful emulation of makesetup would
|
| 230 |
+
# append a .o file to extra_objects only if it
|
| 231 |
+
# had a slash in it; otherwise, it would s/.o/.c/
|
| 232 |
+
# and append it to sources. Hmmmm.
|
| 233 |
+
ext.extra_objects.append(word)
|
| 234 |
+
else:
|
| 235 |
+
file.warn("unrecognized argument '%s'" % word)
|
| 236 |
+
|
| 237 |
+
extensions.append(ext)
|
| 238 |
+
finally:
|
| 239 |
+
file.close()
|
| 240 |
+
|
| 241 |
+
return extensions
|
evalkit_llava/lib/python3.10/distutils/fancy_getopt.py
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.fancy_getopt
|
| 2 |
+
|
| 3 |
+
Wrapper around the standard getopt module that provides the following
|
| 4 |
+
additional features:
|
| 5 |
+
* short and long options are tied together
|
| 6 |
+
* options have help strings, so fancy_getopt could potentially
|
| 7 |
+
create a complete usage summary
|
| 8 |
+
* options set attributes of a passed-in object
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import sys, string, re
|
| 12 |
+
import getopt
|
| 13 |
+
from distutils.errors import *
|
| 14 |
+
|
| 15 |
+
# Much like command_re in distutils.core, this is close to but not quite
|
| 16 |
+
# the same as a Python NAME -- except, in the spirit of most GNU
|
| 17 |
+
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
|
| 18 |
+
# The similarities to NAME are again not a coincidence...
|
| 19 |
+
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
|
| 20 |
+
longopt_re = re.compile(r'^%s$' % longopt_pat)
|
| 21 |
+
|
| 22 |
+
# For recognizing "negative alias" options, eg. "quiet=!verbose"
|
| 23 |
+
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
|
| 24 |
+
|
| 25 |
+
# This is used to translate long options to legitimate Python identifiers
|
| 26 |
+
# (for use as attributes of some object).
|
| 27 |
+
longopt_xlate = str.maketrans('-', '_')
|
| 28 |
+
|
| 29 |
+
class FancyGetopt:
|
| 30 |
+
"""Wrapper around the standard 'getopt()' module that provides some
|
| 31 |
+
handy extra functionality:
|
| 32 |
+
* short and long options are tied together
|
| 33 |
+
* options have help strings, and help text can be assembled
|
| 34 |
+
from them
|
| 35 |
+
* options set attributes of a passed-in object
|
| 36 |
+
* boolean options can have "negative aliases" -- eg. if
|
| 37 |
+
--quiet is the "negative alias" of --verbose, then "--quiet"
|
| 38 |
+
on the command line sets 'verbose' to false
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self, option_table=None):
|
| 42 |
+
# The option table is (currently) a list of tuples. The
|
| 43 |
+
# tuples may have 3 or four values:
|
| 44 |
+
# (long_option, short_option, help_string [, repeatable])
|
| 45 |
+
# if an option takes an argument, its long_option should have '='
|
| 46 |
+
# appended; short_option should just be a single character, no ':'
|
| 47 |
+
# in any case. If a long_option doesn't have a corresponding
|
| 48 |
+
# short_option, short_option should be None. All option tuples
|
| 49 |
+
# must have long options.
|
| 50 |
+
self.option_table = option_table
|
| 51 |
+
|
| 52 |
+
# 'option_index' maps long option names to entries in the option
|
| 53 |
+
# table (ie. those 3-tuples).
|
| 54 |
+
self.option_index = {}
|
| 55 |
+
if self.option_table:
|
| 56 |
+
self._build_index()
|
| 57 |
+
|
| 58 |
+
# 'alias' records (duh) alias options; {'foo': 'bar'} means
|
| 59 |
+
# --foo is an alias for --bar
|
| 60 |
+
self.alias = {}
|
| 61 |
+
|
| 62 |
+
# 'negative_alias' keeps track of options that are the boolean
|
| 63 |
+
# opposite of some other option
|
| 64 |
+
self.negative_alias = {}
|
| 65 |
+
|
| 66 |
+
# These keep track of the information in the option table. We
|
| 67 |
+
# don't actually populate these structures until we're ready to
|
| 68 |
+
# parse the command-line, since the 'option_table' passed in here
|
| 69 |
+
# isn't necessarily the final word.
|
| 70 |
+
self.short_opts = []
|
| 71 |
+
self.long_opts = []
|
| 72 |
+
self.short2long = {}
|
| 73 |
+
self.attr_name = {}
|
| 74 |
+
self.takes_arg = {}
|
| 75 |
+
|
| 76 |
+
# And 'option_order' is filled up in 'getopt()'; it records the
|
| 77 |
+
# original order of options (and their values) on the command-line,
|
| 78 |
+
# but expands short options, converts aliases, etc.
|
| 79 |
+
self.option_order = []
|
| 80 |
+
|
| 81 |
+
def _build_index(self):
|
| 82 |
+
self.option_index.clear()
|
| 83 |
+
for option in self.option_table:
|
| 84 |
+
self.option_index[option[0]] = option
|
| 85 |
+
|
| 86 |
+
def set_option_table(self, option_table):
|
| 87 |
+
self.option_table = option_table
|
| 88 |
+
self._build_index()
|
| 89 |
+
|
| 90 |
+
def add_option(self, long_option, short_option=None, help_string=None):
|
| 91 |
+
if long_option in self.option_index:
|
| 92 |
+
raise DistutilsGetoptError(
|
| 93 |
+
"option conflict: already an option '%s'" % long_option)
|
| 94 |
+
else:
|
| 95 |
+
option = (long_option, short_option, help_string)
|
| 96 |
+
self.option_table.append(option)
|
| 97 |
+
self.option_index[long_option] = option
|
| 98 |
+
|
| 99 |
+
def has_option(self, long_option):
|
| 100 |
+
"""Return true if the option table for this parser has an
|
| 101 |
+
option with long name 'long_option'."""
|
| 102 |
+
return long_option in self.option_index
|
| 103 |
+
|
| 104 |
+
def get_attr_name(self, long_option):
|
| 105 |
+
"""Translate long option name 'long_option' to the form it
|
| 106 |
+
has as an attribute of some object: ie., translate hyphens
|
| 107 |
+
to underscores."""
|
| 108 |
+
return long_option.translate(longopt_xlate)
|
| 109 |
+
|
| 110 |
+
def _check_alias_dict(self, aliases, what):
|
| 111 |
+
assert isinstance(aliases, dict)
|
| 112 |
+
for (alias, opt) in aliases.items():
|
| 113 |
+
if alias not in self.option_index:
|
| 114 |
+
raise DistutilsGetoptError(("invalid %s '%s': "
|
| 115 |
+
"option '%s' not defined") % (what, alias, alias))
|
| 116 |
+
if opt not in self.option_index:
|
| 117 |
+
raise DistutilsGetoptError(("invalid %s '%s': "
|
| 118 |
+
"aliased option '%s' not defined") % (what, alias, opt))
|
| 119 |
+
|
| 120 |
+
def set_aliases(self, alias):
|
| 121 |
+
"""Set the aliases for this option parser."""
|
| 122 |
+
self._check_alias_dict(alias, "alias")
|
| 123 |
+
self.alias = alias
|
| 124 |
+
|
| 125 |
+
def set_negative_aliases(self, negative_alias):
|
| 126 |
+
"""Set the negative aliases for this option parser.
|
| 127 |
+
'negative_alias' should be a dictionary mapping option names to
|
| 128 |
+
option names, both the key and value must already be defined
|
| 129 |
+
in the option table."""
|
| 130 |
+
self._check_alias_dict(negative_alias, "negative alias")
|
| 131 |
+
self.negative_alias = negative_alias
|
| 132 |
+
|
| 133 |
+
def _grok_option_table(self):
|
| 134 |
+
"""Populate the various data structures that keep tabs on the
|
| 135 |
+
option table. Called by 'getopt()' before it can do anything
|
| 136 |
+
worthwhile.
|
| 137 |
+
"""
|
| 138 |
+
self.long_opts = []
|
| 139 |
+
self.short_opts = []
|
| 140 |
+
self.short2long.clear()
|
| 141 |
+
self.repeat = {}
|
| 142 |
+
|
| 143 |
+
for option in self.option_table:
|
| 144 |
+
if len(option) == 3:
|
| 145 |
+
long, short, help = option
|
| 146 |
+
repeat = 0
|
| 147 |
+
elif len(option) == 4:
|
| 148 |
+
long, short, help, repeat = option
|
| 149 |
+
else:
|
| 150 |
+
# the option table is part of the code, so simply
|
| 151 |
+
# assert that it is correct
|
| 152 |
+
raise ValueError("invalid option tuple: %r" % (option,))
|
| 153 |
+
|
| 154 |
+
# Type- and value-check the option names
|
| 155 |
+
if not isinstance(long, str) or len(long) < 2:
|
| 156 |
+
raise DistutilsGetoptError(("invalid long option '%s': "
|
| 157 |
+
"must be a string of length >= 2") % long)
|
| 158 |
+
|
| 159 |
+
if (not ((short is None) or
|
| 160 |
+
(isinstance(short, str) and len(short) == 1))):
|
| 161 |
+
raise DistutilsGetoptError("invalid short option '%s': "
|
| 162 |
+
"must a single character or None" % short)
|
| 163 |
+
|
| 164 |
+
self.repeat[long] = repeat
|
| 165 |
+
self.long_opts.append(long)
|
| 166 |
+
|
| 167 |
+
if long[-1] == '=': # option takes an argument?
|
| 168 |
+
if short: short = short + ':'
|
| 169 |
+
long = long[0:-1]
|
| 170 |
+
self.takes_arg[long] = 1
|
| 171 |
+
else:
|
| 172 |
+
# Is option is a "negative alias" for some other option (eg.
|
| 173 |
+
# "quiet" == "!verbose")?
|
| 174 |
+
alias_to = self.negative_alias.get(long)
|
| 175 |
+
if alias_to is not None:
|
| 176 |
+
if self.takes_arg[alias_to]:
|
| 177 |
+
raise DistutilsGetoptError(
|
| 178 |
+
"invalid negative alias '%s': "
|
| 179 |
+
"aliased option '%s' takes a value"
|
| 180 |
+
% (long, alias_to))
|
| 181 |
+
|
| 182 |
+
self.long_opts[-1] = long # XXX redundant?!
|
| 183 |
+
self.takes_arg[long] = 0
|
| 184 |
+
|
| 185 |
+
# If this is an alias option, make sure its "takes arg" flag is
|
| 186 |
+
# the same as the option it's aliased to.
|
| 187 |
+
alias_to = self.alias.get(long)
|
| 188 |
+
if alias_to is not None:
|
| 189 |
+
if self.takes_arg[long] != self.takes_arg[alias_to]:
|
| 190 |
+
raise DistutilsGetoptError(
|
| 191 |
+
"invalid alias '%s': inconsistent with "
|
| 192 |
+
"aliased option '%s' (one of them takes a value, "
|
| 193 |
+
"the other doesn't"
|
| 194 |
+
% (long, alias_to))
|
| 195 |
+
|
| 196 |
+
# Now enforce some bondage on the long option name, so we can
|
| 197 |
+
# later translate it to an attribute name on some object. Have
|
| 198 |
+
# to do this a bit late to make sure we've removed any trailing
|
| 199 |
+
# '='.
|
| 200 |
+
if not longopt_re.match(long):
|
| 201 |
+
raise DistutilsGetoptError(
|
| 202 |
+
"invalid long option name '%s' "
|
| 203 |
+
"(must be letters, numbers, hyphens only" % long)
|
| 204 |
+
|
| 205 |
+
self.attr_name[long] = self.get_attr_name(long)
|
| 206 |
+
if short:
|
| 207 |
+
self.short_opts.append(short)
|
| 208 |
+
self.short2long[short[0]] = long
|
| 209 |
+
|
| 210 |
+
def getopt(self, args=None, object=None):
|
| 211 |
+
"""Parse command-line options in args. Store as attributes on object.
|
| 212 |
+
|
| 213 |
+
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
|
| 214 |
+
'object' is None or not supplied, creates a new OptionDummy
|
| 215 |
+
object, stores option values there, and returns a tuple (args,
|
| 216 |
+
object). If 'object' is supplied, it is modified in place and
|
| 217 |
+
'getopt()' just returns 'args'; in both cases, the returned
|
| 218 |
+
'args' is a modified copy of the passed-in 'args' list, which
|
| 219 |
+
is left untouched.
|
| 220 |
+
"""
|
| 221 |
+
if args is None:
|
| 222 |
+
args = sys.argv[1:]
|
| 223 |
+
if object is None:
|
| 224 |
+
object = OptionDummy()
|
| 225 |
+
created_object = True
|
| 226 |
+
else:
|
| 227 |
+
created_object = False
|
| 228 |
+
|
| 229 |
+
self._grok_option_table()
|
| 230 |
+
|
| 231 |
+
short_opts = ' '.join(self.short_opts)
|
| 232 |
+
try:
|
| 233 |
+
opts, args = getopt.getopt(args, short_opts, self.long_opts)
|
| 234 |
+
except getopt.error as msg:
|
| 235 |
+
raise DistutilsArgError(msg)
|
| 236 |
+
|
| 237 |
+
for opt, val in opts:
|
| 238 |
+
if len(opt) == 2 and opt[0] == '-': # it's a short option
|
| 239 |
+
opt = self.short2long[opt[1]]
|
| 240 |
+
else:
|
| 241 |
+
assert len(opt) > 2 and opt[:2] == '--'
|
| 242 |
+
opt = opt[2:]
|
| 243 |
+
|
| 244 |
+
alias = self.alias.get(opt)
|
| 245 |
+
if alias:
|
| 246 |
+
opt = alias
|
| 247 |
+
|
| 248 |
+
if not self.takes_arg[opt]: # boolean option?
|
| 249 |
+
assert val == '', "boolean option can't have value"
|
| 250 |
+
alias = self.negative_alias.get(opt)
|
| 251 |
+
if alias:
|
| 252 |
+
opt = alias
|
| 253 |
+
val = 0
|
| 254 |
+
else:
|
| 255 |
+
val = 1
|
| 256 |
+
|
| 257 |
+
attr = self.attr_name[opt]
|
| 258 |
+
# The only repeating option at the moment is 'verbose'.
|
| 259 |
+
# It has a negative option -q quiet, which should set verbose = 0.
|
| 260 |
+
if val and self.repeat.get(attr) is not None:
|
| 261 |
+
val = getattr(object, attr, 0) + 1
|
| 262 |
+
setattr(object, attr, val)
|
| 263 |
+
self.option_order.append((opt, val))
|
| 264 |
+
|
| 265 |
+
# for opts
|
| 266 |
+
if created_object:
|
| 267 |
+
return args, object
|
| 268 |
+
else:
|
| 269 |
+
return args
|
| 270 |
+
|
| 271 |
+
def get_option_order(self):
|
| 272 |
+
"""Returns the list of (option, value) tuples processed by the
|
| 273 |
+
previous run of 'getopt()'. Raises RuntimeError if
|
| 274 |
+
'getopt()' hasn't been called yet.
|
| 275 |
+
"""
|
| 276 |
+
if self.option_order is None:
|
| 277 |
+
raise RuntimeError("'getopt()' hasn't been called yet")
|
| 278 |
+
else:
|
| 279 |
+
return self.option_order
|
| 280 |
+
|
| 281 |
+
def generate_help(self, header=None):
|
| 282 |
+
"""Generate help text (a list of strings, one per suggested line of
|
| 283 |
+
output) from the option table for this FancyGetopt object.
|
| 284 |
+
"""
|
| 285 |
+
# Blithely assume the option table is good: probably wouldn't call
|
| 286 |
+
# 'generate_help()' unless you've already called 'getopt()'.
|
| 287 |
+
|
| 288 |
+
# First pass: determine maximum length of long option names
|
| 289 |
+
max_opt = 0
|
| 290 |
+
for option in self.option_table:
|
| 291 |
+
long = option[0]
|
| 292 |
+
short = option[1]
|
| 293 |
+
l = len(long)
|
| 294 |
+
if long[-1] == '=':
|
| 295 |
+
l = l - 1
|
| 296 |
+
if short is not None:
|
| 297 |
+
l = l + 5 # " (-x)" where short == 'x'
|
| 298 |
+
if l > max_opt:
|
| 299 |
+
max_opt = l
|
| 300 |
+
|
| 301 |
+
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
|
| 302 |
+
|
| 303 |
+
# Typical help block looks like this:
|
| 304 |
+
# --foo controls foonabulation
|
| 305 |
+
# Help block for longest option looks like this:
|
| 306 |
+
# --flimflam set the flim-flam level
|
| 307 |
+
# and with wrapped text:
|
| 308 |
+
# --flimflam set the flim-flam level (must be between
|
| 309 |
+
# 0 and 100, except on Tuesdays)
|
| 310 |
+
# Options with short names will have the short name shown (but
|
| 311 |
+
# it doesn't contribute to max_opt):
|
| 312 |
+
# --foo (-f) controls foonabulation
|
| 313 |
+
# If adding the short option would make the left column too wide,
|
| 314 |
+
# we push the explanation off to the next line
|
| 315 |
+
# --flimflam (-l)
|
| 316 |
+
# set the flim-flam level
|
| 317 |
+
# Important parameters:
|
| 318 |
+
# - 2 spaces before option block start lines
|
| 319 |
+
# - 2 dashes for each long option name
|
| 320 |
+
# - min. 2 spaces between option and explanation (gutter)
|
| 321 |
+
# - 5 characters (incl. space) for short option name
|
| 322 |
+
|
| 323 |
+
# Now generate lines of help text. (If 80 columns were good enough
|
| 324 |
+
# for Jesus, then 78 columns are good enough for me!)
|
| 325 |
+
line_width = 78
|
| 326 |
+
text_width = line_width - opt_width
|
| 327 |
+
big_indent = ' ' * opt_width
|
| 328 |
+
if header:
|
| 329 |
+
lines = [header]
|
| 330 |
+
else:
|
| 331 |
+
lines = ['Option summary:']
|
| 332 |
+
|
| 333 |
+
for option in self.option_table:
|
| 334 |
+
long, short, help = option[:3]
|
| 335 |
+
text = wrap_text(help, text_width)
|
| 336 |
+
if long[-1] == '=':
|
| 337 |
+
long = long[0:-1]
|
| 338 |
+
|
| 339 |
+
# Case 1: no short option at all (makes life easy)
|
| 340 |
+
if short is None:
|
| 341 |
+
if text:
|
| 342 |
+
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
|
| 343 |
+
else:
|
| 344 |
+
lines.append(" --%-*s " % (max_opt, long))
|
| 345 |
+
|
| 346 |
+
# Case 2: we have a short option, so we have to include it
|
| 347 |
+
# just after the long option
|
| 348 |
+
else:
|
| 349 |
+
opt_names = "%s (-%s)" % (long, short)
|
| 350 |
+
if text:
|
| 351 |
+
lines.append(" --%-*s %s" %
|
| 352 |
+
(max_opt, opt_names, text[0]))
|
| 353 |
+
else:
|
| 354 |
+
lines.append(" --%-*s" % opt_names)
|
| 355 |
+
|
| 356 |
+
for l in text[1:]:
|
| 357 |
+
lines.append(big_indent + l)
|
| 358 |
+
return lines
|
| 359 |
+
|
| 360 |
+
def print_help(self, header=None, file=None):
|
| 361 |
+
if file is None:
|
| 362 |
+
file = sys.stdout
|
| 363 |
+
for line in self.generate_help(header):
|
| 364 |
+
file.write(line + "\n")
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def fancy_getopt(options, negative_opt, object, args):
|
| 368 |
+
parser = FancyGetopt(options)
|
| 369 |
+
parser.set_negative_aliases(negative_opt)
|
| 370 |
+
return parser.getopt(args, object)
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
|
| 374 |
+
|
| 375 |
+
def wrap_text(text, width):
|
| 376 |
+
"""wrap_text(text : string, width : int) -> [string]
|
| 377 |
+
|
| 378 |
+
Split 'text' into multiple lines of no more than 'width' characters
|
| 379 |
+
each, and return the list of strings that results.
|
| 380 |
+
"""
|
| 381 |
+
if text is None:
|
| 382 |
+
return []
|
| 383 |
+
if len(text) <= width:
|
| 384 |
+
return [text]
|
| 385 |
+
|
| 386 |
+
text = text.expandtabs()
|
| 387 |
+
text = text.translate(WS_TRANS)
|
| 388 |
+
chunks = re.split(r'( +|-+)', text)
|
| 389 |
+
chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
|
| 390 |
+
lines = []
|
| 391 |
+
|
| 392 |
+
while chunks:
|
| 393 |
+
cur_line = [] # list of chunks (to-be-joined)
|
| 394 |
+
cur_len = 0 # length of current line
|
| 395 |
+
|
| 396 |
+
while chunks:
|
| 397 |
+
l = len(chunks[0])
|
| 398 |
+
if cur_len + l <= width: # can squeeze (at least) this chunk in
|
| 399 |
+
cur_line.append(chunks[0])
|
| 400 |
+
del chunks[0]
|
| 401 |
+
cur_len = cur_len + l
|
| 402 |
+
else: # this line is full
|
| 403 |
+
# drop last chunk if all space
|
| 404 |
+
if cur_line and cur_line[-1][0] == ' ':
|
| 405 |
+
del cur_line[-1]
|
| 406 |
+
break
|
| 407 |
+
|
| 408 |
+
if chunks: # any chunks left to process?
|
| 409 |
+
# if the current line is still empty, then we had a single
|
| 410 |
+
# chunk that's too big too fit on a line -- so we break
|
| 411 |
+
# down and break it up at the line width
|
| 412 |
+
if cur_len == 0:
|
| 413 |
+
cur_line.append(chunks[0][0:width])
|
| 414 |
+
chunks[0] = chunks[0][width:]
|
| 415 |
+
|
| 416 |
+
# all-whitespace chunks at the end of a line can be discarded
|
| 417 |
+
# (and we know from the re.split above that if a chunk has
|
| 418 |
+
# *any* whitespace, it is *all* whitespace)
|
| 419 |
+
if chunks[0][0] == ' ':
|
| 420 |
+
del chunks[0]
|
| 421 |
+
|
| 422 |
+
# and store this line in the list-of-all-lines -- as a single
|
| 423 |
+
# string, of course!
|
| 424 |
+
lines.append(''.join(cur_line))
|
| 425 |
+
|
| 426 |
+
return lines
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
def translate_longopt(opt):
|
| 430 |
+
"""Convert a long option name to a valid Python identifier by
|
| 431 |
+
changing "-" to "_".
|
| 432 |
+
"""
|
| 433 |
+
return opt.translate(longopt_xlate)
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
class OptionDummy:
|
| 437 |
+
"""Dummy class just used as a place to hold command-line option
|
| 438 |
+
values as instance attributes."""
|
| 439 |
+
|
| 440 |
+
def __init__(self, options=[]):
|
| 441 |
+
"""Create a new OptionDummy instance. The attributes listed in
|
| 442 |
+
'options' will be initialized to None."""
|
| 443 |
+
for opt in options:
|
| 444 |
+
setattr(self, opt, None)
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
if __name__ == "__main__":
|
| 448 |
+
text = """\
|
| 449 |
+
Tra-la-la, supercalifragilisticexpialidocious.
|
| 450 |
+
How *do* you spell that odd word, anyways?
|
| 451 |
+
(Someone ask Mary -- she'll know [or she'll
|
| 452 |
+
say, "How should I know?"].)"""
|
| 453 |
+
|
| 454 |
+
for w in (10, 20, 30, 40):
|
| 455 |
+
print("width: %d" % w)
|
| 456 |
+
print("\n".join(wrap_text(text, w)))
|
| 457 |
+
print()
|
evalkit_llava/lib/python3.10/distutils/file_util.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.file_util
|
| 2 |
+
|
| 3 |
+
Utility functions for operating on single files.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
from distutils.errors import DistutilsFileError
|
| 8 |
+
from distutils import log
|
| 9 |
+
|
| 10 |
+
# for generating verbose output in 'copy_file()'
|
| 11 |
+
_copy_action = { None: 'copying',
|
| 12 |
+
'hard': 'hard linking',
|
| 13 |
+
'sym': 'symbolically linking' }
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _copy_file_contents(src, dst, buffer_size=16*1024):
|
| 17 |
+
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
|
| 18 |
+
opening either file, reading from 'src', or writing to 'dst', raises
|
| 19 |
+
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
|
| 20 |
+
bytes (default 16k). No attempt is made to handle anything apart from
|
| 21 |
+
regular files.
|
| 22 |
+
"""
|
| 23 |
+
# Stolen from shutil module in the standard library, but with
|
| 24 |
+
# custom error-handling added.
|
| 25 |
+
fsrc = None
|
| 26 |
+
fdst = None
|
| 27 |
+
try:
|
| 28 |
+
try:
|
| 29 |
+
fsrc = open(src, 'rb')
|
| 30 |
+
except OSError as e:
|
| 31 |
+
raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror))
|
| 32 |
+
|
| 33 |
+
if os.path.exists(dst):
|
| 34 |
+
try:
|
| 35 |
+
os.unlink(dst)
|
| 36 |
+
except OSError as e:
|
| 37 |
+
raise DistutilsFileError(
|
| 38 |
+
"could not delete '%s': %s" % (dst, e.strerror))
|
| 39 |
+
|
| 40 |
+
try:
|
| 41 |
+
fdst = open(dst, 'wb')
|
| 42 |
+
except OSError as e:
|
| 43 |
+
raise DistutilsFileError(
|
| 44 |
+
"could not create '%s': %s" % (dst, e.strerror))
|
| 45 |
+
|
| 46 |
+
while True:
|
| 47 |
+
try:
|
| 48 |
+
buf = fsrc.read(buffer_size)
|
| 49 |
+
except OSError as e:
|
| 50 |
+
raise DistutilsFileError(
|
| 51 |
+
"could not read from '%s': %s" % (src, e.strerror))
|
| 52 |
+
|
| 53 |
+
if not buf:
|
| 54 |
+
break
|
| 55 |
+
|
| 56 |
+
try:
|
| 57 |
+
fdst.write(buf)
|
| 58 |
+
except OSError as e:
|
| 59 |
+
raise DistutilsFileError(
|
| 60 |
+
"could not write to '%s': %s" % (dst, e.strerror))
|
| 61 |
+
finally:
|
| 62 |
+
if fdst:
|
| 63 |
+
fdst.close()
|
| 64 |
+
if fsrc:
|
| 65 |
+
fsrc.close()
|
| 66 |
+
|
| 67 |
+
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
|
| 68 |
+
link=None, verbose=1, dry_run=0):
|
| 69 |
+
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
|
| 70 |
+
copied there with the same name; otherwise, it must be a filename. (If
|
| 71 |
+
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
|
| 72 |
+
is true (the default), the file's mode (type and permission bits, or
|
| 73 |
+
whatever is analogous on the current platform) is copied. If
|
| 74 |
+
'preserve_times' is true (the default), the last-modified and
|
| 75 |
+
last-access times are copied as well. If 'update' is true, 'src' will
|
| 76 |
+
only be copied if 'dst' does not exist, or if 'dst' does exist but is
|
| 77 |
+
older than 'src'.
|
| 78 |
+
|
| 79 |
+
'link' allows you to make hard links (os.link) or symbolic links
|
| 80 |
+
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
|
| 81 |
+
None (the default), files are copied. Don't set 'link' on systems that
|
| 82 |
+
don't support it: 'copy_file()' doesn't check if hard or symbolic
|
| 83 |
+
linking is available. If hardlink fails, falls back to
|
| 84 |
+
_copy_file_contents().
|
| 85 |
+
|
| 86 |
+
Under Mac OS, uses the native file copy function in macostools; on
|
| 87 |
+
other systems, uses '_copy_file_contents()' to copy file contents.
|
| 88 |
+
|
| 89 |
+
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
|
| 90 |
+
the output file, and 'copied' is true if the file was copied (or would
|
| 91 |
+
have been copied, if 'dry_run' true).
|
| 92 |
+
"""
|
| 93 |
+
# XXX if the destination file already exists, we clobber it if
|
| 94 |
+
# copying, but blow up if linking. Hmmm. And I don't know what
|
| 95 |
+
# macostools.copyfile() does. Should definitely be consistent, and
|
| 96 |
+
# should probably blow up if destination exists and we would be
|
| 97 |
+
# changing it (ie. it's not already a hard/soft link to src OR
|
| 98 |
+
# (not update) and (src newer than dst).
|
| 99 |
+
|
| 100 |
+
from distutils.dep_util import newer
|
| 101 |
+
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
|
| 102 |
+
|
| 103 |
+
if not os.path.isfile(src):
|
| 104 |
+
raise DistutilsFileError(
|
| 105 |
+
"can't copy '%s': doesn't exist or not a regular file" % src)
|
| 106 |
+
|
| 107 |
+
if os.path.isdir(dst):
|
| 108 |
+
dir = dst
|
| 109 |
+
dst = os.path.join(dst, os.path.basename(src))
|
| 110 |
+
else:
|
| 111 |
+
dir = os.path.dirname(dst)
|
| 112 |
+
|
| 113 |
+
if update and not newer(src, dst):
|
| 114 |
+
if verbose >= 1:
|
| 115 |
+
log.debug("not copying %s (output up-to-date)", src)
|
| 116 |
+
return (dst, 0)
|
| 117 |
+
|
| 118 |
+
try:
|
| 119 |
+
action = _copy_action[link]
|
| 120 |
+
except KeyError:
|
| 121 |
+
raise ValueError("invalid value '%s' for 'link' argument" % link)
|
| 122 |
+
|
| 123 |
+
if verbose >= 1:
|
| 124 |
+
if os.path.basename(dst) == os.path.basename(src):
|
| 125 |
+
log.info("%s %s -> %s", action, src, dir)
|
| 126 |
+
else:
|
| 127 |
+
log.info("%s %s -> %s", action, src, dst)
|
| 128 |
+
|
| 129 |
+
if dry_run:
|
| 130 |
+
return (dst, 1)
|
| 131 |
+
|
| 132 |
+
# If linking (hard or symbolic), use the appropriate system call
|
| 133 |
+
# (Unix only, of course, but that's the caller's responsibility)
|
| 134 |
+
elif link == 'hard':
|
| 135 |
+
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
|
| 136 |
+
try:
|
| 137 |
+
os.link(src, dst)
|
| 138 |
+
return (dst, 1)
|
| 139 |
+
except OSError:
|
| 140 |
+
# If hard linking fails, fall back on copying file
|
| 141 |
+
# (some special filesystems don't support hard linking
|
| 142 |
+
# even under Unix, see issue #8876).
|
| 143 |
+
pass
|
| 144 |
+
elif link == 'sym':
|
| 145 |
+
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
|
| 146 |
+
os.symlink(src, dst)
|
| 147 |
+
return (dst, 1)
|
| 148 |
+
|
| 149 |
+
# Otherwise (non-Mac, not linking), copy the file contents and
|
| 150 |
+
# (optionally) copy the times and mode.
|
| 151 |
+
_copy_file_contents(src, dst)
|
| 152 |
+
if preserve_mode or preserve_times:
|
| 153 |
+
st = os.stat(src)
|
| 154 |
+
|
| 155 |
+
# According to David Ascher <da@ski.org>, utime() should be done
|
| 156 |
+
# before chmod() (at least under NT).
|
| 157 |
+
if preserve_times:
|
| 158 |
+
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
|
| 159 |
+
if preserve_mode:
|
| 160 |
+
os.chmod(dst, S_IMODE(st[ST_MODE]))
|
| 161 |
+
|
| 162 |
+
return (dst, 1)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
# XXX I suspect this is Unix-specific -- need porting help!
|
| 166 |
+
def move_file (src, dst,
|
| 167 |
+
verbose=1,
|
| 168 |
+
dry_run=0):
|
| 169 |
+
|
| 170 |
+
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
|
| 171 |
+
be moved into it with the same name; otherwise, 'src' is just renamed
|
| 172 |
+
to 'dst'. Return the new full name of the file.
|
| 173 |
+
|
| 174 |
+
Handles cross-device moves on Unix using 'copy_file()'. What about
|
| 175 |
+
other systems???
|
| 176 |
+
"""
|
| 177 |
+
from os.path import exists, isfile, isdir, basename, dirname
|
| 178 |
+
import errno
|
| 179 |
+
|
| 180 |
+
if verbose >= 1:
|
| 181 |
+
log.info("moving %s -> %s", src, dst)
|
| 182 |
+
|
| 183 |
+
if dry_run:
|
| 184 |
+
return dst
|
| 185 |
+
|
| 186 |
+
if not isfile(src):
|
| 187 |
+
raise DistutilsFileError("can't move '%s': not a regular file" % src)
|
| 188 |
+
|
| 189 |
+
if isdir(dst):
|
| 190 |
+
dst = os.path.join(dst, basename(src))
|
| 191 |
+
elif exists(dst):
|
| 192 |
+
raise DistutilsFileError(
|
| 193 |
+
"can't move '%s': destination '%s' already exists" %
|
| 194 |
+
(src, dst))
|
| 195 |
+
|
| 196 |
+
if not isdir(dirname(dst)):
|
| 197 |
+
raise DistutilsFileError(
|
| 198 |
+
"can't move '%s': destination '%s' not a valid path" %
|
| 199 |
+
(src, dst))
|
| 200 |
+
|
| 201 |
+
copy_it = False
|
| 202 |
+
try:
|
| 203 |
+
os.rename(src, dst)
|
| 204 |
+
except OSError as e:
|
| 205 |
+
(num, msg) = e.args
|
| 206 |
+
if num == errno.EXDEV:
|
| 207 |
+
copy_it = True
|
| 208 |
+
else:
|
| 209 |
+
raise DistutilsFileError(
|
| 210 |
+
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
|
| 211 |
+
|
| 212 |
+
if copy_it:
|
| 213 |
+
copy_file(src, dst, verbose=verbose)
|
| 214 |
+
try:
|
| 215 |
+
os.unlink(src)
|
| 216 |
+
except OSError as e:
|
| 217 |
+
(num, msg) = e.args
|
| 218 |
+
try:
|
| 219 |
+
os.unlink(dst)
|
| 220 |
+
except OSError:
|
| 221 |
+
pass
|
| 222 |
+
raise DistutilsFileError(
|
| 223 |
+
"couldn't move '%s' to '%s' by copy/delete: "
|
| 224 |
+
"delete '%s' failed: %s"
|
| 225 |
+
% (src, dst, src, msg))
|
| 226 |
+
return dst
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def write_file (filename, contents):
|
| 230 |
+
"""Create a file with the specified name and write 'contents' (a
|
| 231 |
+
sequence of strings without line terminators) to it.
|
| 232 |
+
"""
|
| 233 |
+
f = open(filename, "w")
|
| 234 |
+
try:
|
| 235 |
+
for line in contents:
|
| 236 |
+
f.write(line + "\n")
|
| 237 |
+
finally:
|
| 238 |
+
f.close()
|
evalkit_llava/lib/python3.10/distutils/filelist.py
ADDED
|
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.filelist
|
| 2 |
+
|
| 3 |
+
Provides the FileList class, used for poking about the filesystem
|
| 4 |
+
and building lists of files.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os, re
|
| 8 |
+
import fnmatch
|
| 9 |
+
import functools
|
| 10 |
+
from distutils.util import convert_path
|
| 11 |
+
from distutils.errors import DistutilsTemplateError, DistutilsInternalError
|
| 12 |
+
from distutils import log
|
| 13 |
+
|
| 14 |
+
class FileList:
|
| 15 |
+
"""A list of files built by on exploring the filesystem and filtered by
|
| 16 |
+
applying various patterns to what we find there.
|
| 17 |
+
|
| 18 |
+
Instance attributes:
|
| 19 |
+
dir
|
| 20 |
+
directory from which files will be taken -- only used if
|
| 21 |
+
'allfiles' not supplied to constructor
|
| 22 |
+
files
|
| 23 |
+
list of filenames currently being built/filtered/manipulated
|
| 24 |
+
allfiles
|
| 25 |
+
complete list of files under consideration (ie. without any
|
| 26 |
+
filtering applied)
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, warn=None, debug_print=None):
|
| 30 |
+
# ignore argument to FileList, but keep them for backwards
|
| 31 |
+
# compatibility
|
| 32 |
+
self.allfiles = None
|
| 33 |
+
self.files = []
|
| 34 |
+
|
| 35 |
+
def set_allfiles(self, allfiles):
|
| 36 |
+
self.allfiles = allfiles
|
| 37 |
+
|
| 38 |
+
def findall(self, dir=os.curdir):
|
| 39 |
+
self.allfiles = findall(dir)
|
| 40 |
+
|
| 41 |
+
def debug_print(self, msg):
|
| 42 |
+
"""Print 'msg' to stdout if the global DEBUG (taken from the
|
| 43 |
+
DISTUTILS_DEBUG environment variable) flag is true.
|
| 44 |
+
"""
|
| 45 |
+
from distutils.debug import DEBUG
|
| 46 |
+
if DEBUG:
|
| 47 |
+
print(msg)
|
| 48 |
+
|
| 49 |
+
# -- List-like methods ---------------------------------------------
|
| 50 |
+
|
| 51 |
+
def append(self, item):
|
| 52 |
+
self.files.append(item)
|
| 53 |
+
|
| 54 |
+
def extend(self, items):
|
| 55 |
+
self.files.extend(items)
|
| 56 |
+
|
| 57 |
+
def sort(self):
|
| 58 |
+
# Not a strict lexical sort!
|
| 59 |
+
sortable_files = sorted(map(os.path.split, self.files))
|
| 60 |
+
self.files = []
|
| 61 |
+
for sort_tuple in sortable_files:
|
| 62 |
+
self.files.append(os.path.join(*sort_tuple))
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# -- Other miscellaneous utility methods ---------------------------
|
| 66 |
+
|
| 67 |
+
def remove_duplicates(self):
|
| 68 |
+
# Assumes list has been sorted!
|
| 69 |
+
for i in range(len(self.files) - 1, 0, -1):
|
| 70 |
+
if self.files[i] == self.files[i - 1]:
|
| 71 |
+
del self.files[i]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# -- "File template" methods ---------------------------------------
|
| 75 |
+
|
| 76 |
+
def _parse_template_line(self, line):
|
| 77 |
+
words = line.split()
|
| 78 |
+
action = words[0]
|
| 79 |
+
|
| 80 |
+
patterns = dir = dir_pattern = None
|
| 81 |
+
|
| 82 |
+
if action in ('include', 'exclude',
|
| 83 |
+
'global-include', 'global-exclude'):
|
| 84 |
+
if len(words) < 2:
|
| 85 |
+
raise DistutilsTemplateError(
|
| 86 |
+
"'%s' expects <pattern1> <pattern2> ..." % action)
|
| 87 |
+
patterns = [convert_path(w) for w in words[1:]]
|
| 88 |
+
elif action in ('recursive-include', 'recursive-exclude'):
|
| 89 |
+
if len(words) < 3:
|
| 90 |
+
raise DistutilsTemplateError(
|
| 91 |
+
"'%s' expects <dir> <pattern1> <pattern2> ..." % action)
|
| 92 |
+
dir = convert_path(words[1])
|
| 93 |
+
patterns = [convert_path(w) for w in words[2:]]
|
| 94 |
+
elif action in ('graft', 'prune'):
|
| 95 |
+
if len(words) != 2:
|
| 96 |
+
raise DistutilsTemplateError(
|
| 97 |
+
"'%s' expects a single <dir_pattern>" % action)
|
| 98 |
+
dir_pattern = convert_path(words[1])
|
| 99 |
+
else:
|
| 100 |
+
raise DistutilsTemplateError("unknown action '%s'" % action)
|
| 101 |
+
|
| 102 |
+
return (action, patterns, dir, dir_pattern)
|
| 103 |
+
|
| 104 |
+
def process_template_line(self, line):
|
| 105 |
+
# Parse the line: split it up, make sure the right number of words
|
| 106 |
+
# is there, and return the relevant words. 'action' is always
|
| 107 |
+
# defined: it's the first word of the line. Which of the other
|
| 108 |
+
# three are defined depends on the action; it'll be either
|
| 109 |
+
# patterns, (dir and patterns), or (dir_pattern).
|
| 110 |
+
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
|
| 111 |
+
|
| 112 |
+
# OK, now we know that the action is valid and we have the
|
| 113 |
+
# right number of words on the line for that action -- so we
|
| 114 |
+
# can proceed with minimal error-checking.
|
| 115 |
+
if action == 'include':
|
| 116 |
+
self.debug_print("include " + ' '.join(patterns))
|
| 117 |
+
for pattern in patterns:
|
| 118 |
+
if not self.include_pattern(pattern, anchor=1):
|
| 119 |
+
log.warn("warning: no files found matching '%s'",
|
| 120 |
+
pattern)
|
| 121 |
+
|
| 122 |
+
elif action == 'exclude':
|
| 123 |
+
self.debug_print("exclude " + ' '.join(patterns))
|
| 124 |
+
for pattern in patterns:
|
| 125 |
+
if not self.exclude_pattern(pattern, anchor=1):
|
| 126 |
+
log.warn(("warning: no previously-included files "
|
| 127 |
+
"found matching '%s'"), pattern)
|
| 128 |
+
|
| 129 |
+
elif action == 'global-include':
|
| 130 |
+
self.debug_print("global-include " + ' '.join(patterns))
|
| 131 |
+
for pattern in patterns:
|
| 132 |
+
if not self.include_pattern(pattern, anchor=0):
|
| 133 |
+
log.warn(("warning: no files found matching '%s' "
|
| 134 |
+
"anywhere in distribution"), pattern)
|
| 135 |
+
|
| 136 |
+
elif action == 'global-exclude':
|
| 137 |
+
self.debug_print("global-exclude " + ' '.join(patterns))
|
| 138 |
+
for pattern in patterns:
|
| 139 |
+
if not self.exclude_pattern(pattern, anchor=0):
|
| 140 |
+
log.warn(("warning: no previously-included files matching "
|
| 141 |
+
"'%s' found anywhere in distribution"),
|
| 142 |
+
pattern)
|
| 143 |
+
|
| 144 |
+
elif action == 'recursive-include':
|
| 145 |
+
self.debug_print("recursive-include %s %s" %
|
| 146 |
+
(dir, ' '.join(patterns)))
|
| 147 |
+
for pattern in patterns:
|
| 148 |
+
if not self.include_pattern(pattern, prefix=dir):
|
| 149 |
+
log.warn(("warning: no files found matching '%s' "
|
| 150 |
+
"under directory '%s'"),
|
| 151 |
+
pattern, dir)
|
| 152 |
+
|
| 153 |
+
elif action == 'recursive-exclude':
|
| 154 |
+
self.debug_print("recursive-exclude %s %s" %
|
| 155 |
+
(dir, ' '.join(patterns)))
|
| 156 |
+
for pattern in patterns:
|
| 157 |
+
if not self.exclude_pattern(pattern, prefix=dir):
|
| 158 |
+
log.warn(("warning: no previously-included files matching "
|
| 159 |
+
"'%s' found under directory '%s'"),
|
| 160 |
+
pattern, dir)
|
| 161 |
+
|
| 162 |
+
elif action == 'graft':
|
| 163 |
+
self.debug_print("graft " + dir_pattern)
|
| 164 |
+
if not self.include_pattern(None, prefix=dir_pattern):
|
| 165 |
+
log.warn("warning: no directories found matching '%s'",
|
| 166 |
+
dir_pattern)
|
| 167 |
+
|
| 168 |
+
elif action == 'prune':
|
| 169 |
+
self.debug_print("prune " + dir_pattern)
|
| 170 |
+
if not self.exclude_pattern(None, prefix=dir_pattern):
|
| 171 |
+
log.warn(("no previously-included directories found "
|
| 172 |
+
"matching '%s'"), dir_pattern)
|
| 173 |
+
else:
|
| 174 |
+
raise DistutilsInternalError(
|
| 175 |
+
"this cannot happen: invalid action '%s'" % action)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
# -- Filtering/selection methods -----------------------------------
|
| 179 |
+
|
| 180 |
+
def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
|
| 181 |
+
"""Select strings (presumably filenames) from 'self.files' that
|
| 182 |
+
match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
|
| 183 |
+
are not quite the same as implemented by the 'fnmatch' module: '*'
|
| 184 |
+
and '?' match non-special characters, where "special" is platform-
|
| 185 |
+
dependent: slash on Unix; colon, slash, and backslash on
|
| 186 |
+
DOS/Windows; and colon on Mac OS.
|
| 187 |
+
|
| 188 |
+
If 'anchor' is true (the default), then the pattern match is more
|
| 189 |
+
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
|
| 190 |
+
'anchor' is false, both of these will match.
|
| 191 |
+
|
| 192 |
+
If 'prefix' is supplied, then only filenames starting with 'prefix'
|
| 193 |
+
(itself a pattern) and ending with 'pattern', with anything in between
|
| 194 |
+
them, will match. 'anchor' is ignored in this case.
|
| 195 |
+
|
| 196 |
+
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
|
| 197 |
+
'pattern' is assumed to be either a string containing a regex or a
|
| 198 |
+
regex object -- no translation is done, the regex is just compiled
|
| 199 |
+
and used as-is.
|
| 200 |
+
|
| 201 |
+
Selected strings will be added to self.files.
|
| 202 |
+
|
| 203 |
+
Return True if files are found, False otherwise.
|
| 204 |
+
"""
|
| 205 |
+
# XXX docstring lying about what the special chars are?
|
| 206 |
+
files_found = False
|
| 207 |
+
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
|
| 208 |
+
self.debug_print("include_pattern: applying regex r'%s'" %
|
| 209 |
+
pattern_re.pattern)
|
| 210 |
+
|
| 211 |
+
# delayed loading of allfiles list
|
| 212 |
+
if self.allfiles is None:
|
| 213 |
+
self.findall()
|
| 214 |
+
|
| 215 |
+
for name in self.allfiles:
|
| 216 |
+
if pattern_re.search(name):
|
| 217 |
+
self.debug_print(" adding " + name)
|
| 218 |
+
self.files.append(name)
|
| 219 |
+
files_found = True
|
| 220 |
+
return files_found
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def exclude_pattern (self, pattern,
|
| 224 |
+
anchor=1, prefix=None, is_regex=0):
|
| 225 |
+
"""Remove strings (presumably filenames) from 'files' that match
|
| 226 |
+
'pattern'. Other parameters are the same as for
|
| 227 |
+
'include_pattern()', above.
|
| 228 |
+
The list 'self.files' is modified in place.
|
| 229 |
+
Return True if files are found, False otherwise.
|
| 230 |
+
"""
|
| 231 |
+
files_found = False
|
| 232 |
+
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
|
| 233 |
+
self.debug_print("exclude_pattern: applying regex r'%s'" %
|
| 234 |
+
pattern_re.pattern)
|
| 235 |
+
for i in range(len(self.files)-1, -1, -1):
|
| 236 |
+
if pattern_re.search(self.files[i]):
|
| 237 |
+
self.debug_print(" removing " + self.files[i])
|
| 238 |
+
del self.files[i]
|
| 239 |
+
files_found = True
|
| 240 |
+
return files_found
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
# ----------------------------------------------------------------------
|
| 244 |
+
# Utility functions
|
| 245 |
+
|
| 246 |
+
def _find_all_simple(path):
|
| 247 |
+
"""
|
| 248 |
+
Find all files under 'path'
|
| 249 |
+
"""
|
| 250 |
+
results = (
|
| 251 |
+
os.path.join(base, file)
|
| 252 |
+
for base, dirs, files in os.walk(path, followlinks=True)
|
| 253 |
+
for file in files
|
| 254 |
+
)
|
| 255 |
+
return filter(os.path.isfile, results)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def findall(dir=os.curdir):
|
| 259 |
+
"""
|
| 260 |
+
Find all files under 'dir' and return the list of full filenames.
|
| 261 |
+
Unless dir is '.', return full filenames with dir prepended.
|
| 262 |
+
"""
|
| 263 |
+
files = _find_all_simple(dir)
|
| 264 |
+
if dir == os.curdir:
|
| 265 |
+
make_rel = functools.partial(os.path.relpath, start=dir)
|
| 266 |
+
files = map(make_rel, files)
|
| 267 |
+
return list(files)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def glob_to_re(pattern):
|
| 271 |
+
"""Translate a shell-like glob pattern to a regular expression; return
|
| 272 |
+
a string containing the regex. Differs from 'fnmatch.translate()' in
|
| 273 |
+
that '*' does not match "special characters" (which are
|
| 274 |
+
platform-specific).
|
| 275 |
+
"""
|
| 276 |
+
pattern_re = fnmatch.translate(pattern)
|
| 277 |
+
|
| 278 |
+
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
|
| 279 |
+
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
|
| 280 |
+
# and by extension they shouldn't match such "special characters" under
|
| 281 |
+
# any OS. So change all non-escaped dots in the RE to match any
|
| 282 |
+
# character except the special characters (currently: just os.sep).
|
| 283 |
+
sep = os.sep
|
| 284 |
+
if os.sep == '\\':
|
| 285 |
+
# we're using a regex to manipulate a regex, so we need
|
| 286 |
+
# to escape the backslash twice
|
| 287 |
+
sep = r'\\\\'
|
| 288 |
+
escaped = r'\1[^%s]' % sep
|
| 289 |
+
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
|
| 290 |
+
return pattern_re
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0):
|
| 294 |
+
"""Translate a shell-like wildcard pattern to a compiled regular
|
| 295 |
+
expression. Return the compiled regex. If 'is_regex' true,
|
| 296 |
+
then 'pattern' is directly compiled to a regex (if it's a string)
|
| 297 |
+
or just returned as-is (assumes it's a regex object).
|
| 298 |
+
"""
|
| 299 |
+
if is_regex:
|
| 300 |
+
if isinstance(pattern, str):
|
| 301 |
+
return re.compile(pattern)
|
| 302 |
+
else:
|
| 303 |
+
return pattern
|
| 304 |
+
|
| 305 |
+
# ditch start and end characters
|
| 306 |
+
start, _, end = glob_to_re('_').partition('_')
|
| 307 |
+
|
| 308 |
+
if pattern:
|
| 309 |
+
pattern_re = glob_to_re(pattern)
|
| 310 |
+
assert pattern_re.startswith(start) and pattern_re.endswith(end)
|
| 311 |
+
else:
|
| 312 |
+
pattern_re = ''
|
| 313 |
+
|
| 314 |
+
if prefix is not None:
|
| 315 |
+
prefix_re = glob_to_re(prefix)
|
| 316 |
+
assert prefix_re.startswith(start) and prefix_re.endswith(end)
|
| 317 |
+
prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
|
| 318 |
+
sep = os.sep
|
| 319 |
+
if os.sep == '\\':
|
| 320 |
+
sep = r'\\'
|
| 321 |
+
pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
|
| 322 |
+
pattern_re = r'%s\A%s%s.*%s%s' % (start, prefix_re, sep, pattern_re, end)
|
| 323 |
+
else: # no prefix -- respect anchor flag
|
| 324 |
+
if anchor:
|
| 325 |
+
pattern_re = r'%s\A%s' % (start, pattern_re[len(start):])
|
| 326 |
+
|
| 327 |
+
return re.compile(pattern_re)
|
evalkit_llava/lib/python3.10/distutils/log.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""A simple log mechanism styled after PEP 282."""
|
| 2 |
+
|
| 3 |
+
# The class here is styled after PEP 282 so that it could later be
|
| 4 |
+
# replaced with a standard Python logging implementation.
|
| 5 |
+
|
| 6 |
+
DEBUG = 1
|
| 7 |
+
INFO = 2
|
| 8 |
+
WARN = 3
|
| 9 |
+
ERROR = 4
|
| 10 |
+
FATAL = 5
|
| 11 |
+
|
| 12 |
+
import sys
|
| 13 |
+
|
| 14 |
+
class Log:
|
| 15 |
+
|
| 16 |
+
def __init__(self, threshold=WARN):
|
| 17 |
+
self.threshold = threshold
|
| 18 |
+
|
| 19 |
+
def _log(self, level, msg, args):
|
| 20 |
+
if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
|
| 21 |
+
raise ValueError('%s wrong log level' % str(level))
|
| 22 |
+
|
| 23 |
+
if level >= self.threshold:
|
| 24 |
+
if args:
|
| 25 |
+
msg = msg % args
|
| 26 |
+
if level in (WARN, ERROR, FATAL):
|
| 27 |
+
stream = sys.stderr
|
| 28 |
+
else:
|
| 29 |
+
stream = sys.stdout
|
| 30 |
+
try:
|
| 31 |
+
stream.write('%s\n' % msg)
|
| 32 |
+
except UnicodeEncodeError:
|
| 33 |
+
# emulate backslashreplace error handler
|
| 34 |
+
encoding = stream.encoding
|
| 35 |
+
msg = msg.encode(encoding, "backslashreplace").decode(encoding)
|
| 36 |
+
stream.write('%s\n' % msg)
|
| 37 |
+
stream.flush()
|
| 38 |
+
|
| 39 |
+
def log(self, level, msg, *args):
|
| 40 |
+
self._log(level, msg, args)
|
| 41 |
+
|
| 42 |
+
def debug(self, msg, *args):
|
| 43 |
+
self._log(DEBUG, msg, args)
|
| 44 |
+
|
| 45 |
+
def info(self, msg, *args):
|
| 46 |
+
self._log(INFO, msg, args)
|
| 47 |
+
|
| 48 |
+
def warn(self, msg, *args):
|
| 49 |
+
self._log(WARN, msg, args)
|
| 50 |
+
|
| 51 |
+
def error(self, msg, *args):
|
| 52 |
+
self._log(ERROR, msg, args)
|
| 53 |
+
|
| 54 |
+
def fatal(self, msg, *args):
|
| 55 |
+
self._log(FATAL, msg, args)
|
| 56 |
+
|
| 57 |
+
_global_log = Log()
|
| 58 |
+
log = _global_log.log
|
| 59 |
+
debug = _global_log.debug
|
| 60 |
+
info = _global_log.info
|
| 61 |
+
warn = _global_log.warn
|
| 62 |
+
error = _global_log.error
|
| 63 |
+
fatal = _global_log.fatal
|
| 64 |
+
|
| 65 |
+
def set_threshold(level):
|
| 66 |
+
# return the old threshold for use from tests
|
| 67 |
+
old = _global_log.threshold
|
| 68 |
+
_global_log.threshold = level
|
| 69 |
+
return old
|
| 70 |
+
|
| 71 |
+
def set_verbosity(v):
|
| 72 |
+
if v <= 0:
|
| 73 |
+
set_threshold(WARN)
|
| 74 |
+
elif v == 1:
|
| 75 |
+
set_threshold(INFO)
|
| 76 |
+
elif v >= 2:
|
| 77 |
+
set_threshold(DEBUG)
|
evalkit_llava/lib/python3.10/distutils/msvccompiler.py
ADDED
|
@@ -0,0 +1,643 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""distutils.msvccompiler
|
| 2 |
+
|
| 3 |
+
Contains MSVCCompiler, an implementation of the abstract CCompiler class
|
| 4 |
+
for the Microsoft Visual Studio.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
# Written by Perry Stoll
|
| 8 |
+
# hacked by Robin Becker and Thomas Heller to do a better job of
|
| 9 |
+
# finding DevStudio (through the registry)
|
| 10 |
+
|
| 11 |
+
import sys, os
|
| 12 |
+
from distutils.errors import \
|
| 13 |
+
DistutilsExecError, DistutilsPlatformError, \
|
| 14 |
+
CompileError, LibError, LinkError
|
| 15 |
+
from distutils.ccompiler import \
|
| 16 |
+
CCompiler, gen_lib_options
|
| 17 |
+
from distutils import log
|
| 18 |
+
|
| 19 |
+
_can_read_reg = False
|
| 20 |
+
try:
|
| 21 |
+
import winreg
|
| 22 |
+
|
| 23 |
+
_can_read_reg = True
|
| 24 |
+
hkey_mod = winreg
|
| 25 |
+
|
| 26 |
+
RegOpenKeyEx = winreg.OpenKeyEx
|
| 27 |
+
RegEnumKey = winreg.EnumKey
|
| 28 |
+
RegEnumValue = winreg.EnumValue
|
| 29 |
+
RegError = winreg.error
|
| 30 |
+
|
| 31 |
+
except ImportError:
|
| 32 |
+
try:
|
| 33 |
+
import win32api
|
| 34 |
+
import win32con
|
| 35 |
+
_can_read_reg = True
|
| 36 |
+
hkey_mod = win32con
|
| 37 |
+
|
| 38 |
+
RegOpenKeyEx = win32api.RegOpenKeyEx
|
| 39 |
+
RegEnumKey = win32api.RegEnumKey
|
| 40 |
+
RegEnumValue = win32api.RegEnumValue
|
| 41 |
+
RegError = win32api.error
|
| 42 |
+
except ImportError:
|
| 43 |
+
log.info("Warning: Can't read registry to find the "
|
| 44 |
+
"necessary compiler setting\n"
|
| 45 |
+
"Make sure that Python modules winreg, "
|
| 46 |
+
"win32api or win32con are installed.")
|
| 47 |
+
pass
|
| 48 |
+
|
| 49 |
+
if _can_read_reg:
|
| 50 |
+
HKEYS = (hkey_mod.HKEY_USERS,
|
| 51 |
+
hkey_mod.HKEY_CURRENT_USER,
|
| 52 |
+
hkey_mod.HKEY_LOCAL_MACHINE,
|
| 53 |
+
hkey_mod.HKEY_CLASSES_ROOT)
|
| 54 |
+
|
| 55 |
+
def read_keys(base, key):
|
| 56 |
+
"""Return list of registry keys."""
|
| 57 |
+
try:
|
| 58 |
+
handle = RegOpenKeyEx(base, key)
|
| 59 |
+
except RegError:
|
| 60 |
+
return None
|
| 61 |
+
L = []
|
| 62 |
+
i = 0
|
| 63 |
+
while True:
|
| 64 |
+
try:
|
| 65 |
+
k = RegEnumKey(handle, i)
|
| 66 |
+
except RegError:
|
| 67 |
+
break
|
| 68 |
+
L.append(k)
|
| 69 |
+
i += 1
|
| 70 |
+
return L
|
| 71 |
+
|
| 72 |
+
def read_values(base, key):
|
| 73 |
+
"""Return dict of registry keys and values.
|
| 74 |
+
|
| 75 |
+
All names are converted to lowercase.
|
| 76 |
+
"""
|
| 77 |
+
try:
|
| 78 |
+
handle = RegOpenKeyEx(base, key)
|
| 79 |
+
except RegError:
|
| 80 |
+
return None
|
| 81 |
+
d = {}
|
| 82 |
+
i = 0
|
| 83 |
+
while True:
|
| 84 |
+
try:
|
| 85 |
+
name, value, type = RegEnumValue(handle, i)
|
| 86 |
+
except RegError:
|
| 87 |
+
break
|
| 88 |
+
name = name.lower()
|
| 89 |
+
d[convert_mbcs(name)] = convert_mbcs(value)
|
| 90 |
+
i += 1
|
| 91 |
+
return d
|
| 92 |
+
|
| 93 |
+
def convert_mbcs(s):
|
| 94 |
+
dec = getattr(s, "decode", None)
|
| 95 |
+
if dec is not None:
|
| 96 |
+
try:
|
| 97 |
+
s = dec("mbcs")
|
| 98 |
+
except UnicodeError:
|
| 99 |
+
pass
|
| 100 |
+
return s
|
| 101 |
+
|
| 102 |
+
class MacroExpander:
|
| 103 |
+
def __init__(self, version):
|
| 104 |
+
self.macros = {}
|
| 105 |
+
self.load_macros(version)
|
| 106 |
+
|
| 107 |
+
def set_macro(self, macro, path, key):
|
| 108 |
+
for base in HKEYS:
|
| 109 |
+
d = read_values(base, path)
|
| 110 |
+
if d:
|
| 111 |
+
self.macros["$(%s)" % macro] = d[key]
|
| 112 |
+
break
|
| 113 |
+
|
| 114 |
+
def load_macros(self, version):
|
| 115 |
+
vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
|
| 116 |
+
self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
|
| 117 |
+
self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
|
| 118 |
+
net = r"Software\Microsoft\.NETFramework"
|
| 119 |
+
self.set_macro("FrameworkDir", net, "installroot")
|
| 120 |
+
try:
|
| 121 |
+
if version > 7.0:
|
| 122 |
+
self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
|
| 123 |
+
else:
|
| 124 |
+
self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
|
| 125 |
+
except KeyError as exc: #
|
| 126 |
+
raise DistutilsPlatformError(
|
| 127 |
+
"""Python was built with Visual Studio 2003;
|
| 128 |
+
extensions must be built with a compiler than can generate compatible binaries.
|
| 129 |
+
Visual Studio 2003 was not found on this system. If you have Cygwin installed,
|
| 130 |
+
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
|
| 131 |
+
|
| 132 |
+
p = r"Software\Microsoft\NET Framework Setup\Product"
|
| 133 |
+
for base in HKEYS:
|
| 134 |
+
try:
|
| 135 |
+
h = RegOpenKeyEx(base, p)
|
| 136 |
+
except RegError:
|
| 137 |
+
continue
|
| 138 |
+
key = RegEnumKey(h, 0)
|
| 139 |
+
d = read_values(base, r"%s\%s" % (p, key))
|
| 140 |
+
self.macros["$(FrameworkVersion)"] = d["version"]
|
| 141 |
+
|
| 142 |
+
def sub(self, s):
|
| 143 |
+
for k, v in self.macros.items():
|
| 144 |
+
s = s.replace(k, v)
|
| 145 |
+
return s
|
| 146 |
+
|
| 147 |
+
def get_build_version():
|
| 148 |
+
"""Return the version of MSVC that was used to build Python.
|
| 149 |
+
|
| 150 |
+
For Python 2.3 and up, the version number is included in
|
| 151 |
+
sys.version. For earlier versions, assume the compiler is MSVC 6.
|
| 152 |
+
"""
|
| 153 |
+
prefix = "MSC v."
|
| 154 |
+
i = sys.version.find(prefix)
|
| 155 |
+
if i == -1:
|
| 156 |
+
return 6
|
| 157 |
+
i = i + len(prefix)
|
| 158 |
+
s, rest = sys.version[i:].split(" ", 1)
|
| 159 |
+
majorVersion = int(s[:-2]) - 6
|
| 160 |
+
if majorVersion >= 13:
|
| 161 |
+
# v13 was skipped and should be v14
|
| 162 |
+
majorVersion += 1
|
| 163 |
+
minorVersion = int(s[2:3]) / 10.0
|
| 164 |
+
# I don't think paths are affected by minor version in version 6
|
| 165 |
+
if majorVersion == 6:
|
| 166 |
+
minorVersion = 0
|
| 167 |
+
if majorVersion >= 6:
|
| 168 |
+
return majorVersion + minorVersion
|
| 169 |
+
# else we don't know what version of the compiler this is
|
| 170 |
+
return None
|
| 171 |
+
|
| 172 |
+
def get_build_architecture():
|
| 173 |
+
"""Return the processor architecture.
|
| 174 |
+
|
| 175 |
+
Possible results are "Intel" or "AMD64".
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
prefix = " bit ("
|
| 179 |
+
i = sys.version.find(prefix)
|
| 180 |
+
if i == -1:
|
| 181 |
+
return "Intel"
|
| 182 |
+
j = sys.version.find(")", i)
|
| 183 |
+
return sys.version[i+len(prefix):j]
|
| 184 |
+
|
| 185 |
+
def normalize_and_reduce_paths(paths):
|
| 186 |
+
"""Return a list of normalized paths with duplicates removed.
|
| 187 |
+
|
| 188 |
+
The current order of paths is maintained.
|
| 189 |
+
"""
|
| 190 |
+
# Paths are normalized so things like: /a and /a/ aren't both preserved.
|
| 191 |
+
reduced_paths = []
|
| 192 |
+
for p in paths:
|
| 193 |
+
np = os.path.normpath(p)
|
| 194 |
+
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
|
| 195 |
+
if np not in reduced_paths:
|
| 196 |
+
reduced_paths.append(np)
|
| 197 |
+
return reduced_paths
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class MSVCCompiler(CCompiler) :
|
| 201 |
+
"""Concrete class that implements an interface to Microsoft Visual C++,
|
| 202 |
+
as defined by the CCompiler abstract class."""
|
| 203 |
+
|
| 204 |
+
compiler_type = 'msvc'
|
| 205 |
+
|
| 206 |
+
# Just set this so CCompiler's constructor doesn't barf. We currently
|
| 207 |
+
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
|
| 208 |
+
# as it really isn't necessary for this sort of single-compiler class.
|
| 209 |
+
# Would be nice to have a consistent interface with UnixCCompiler,
|
| 210 |
+
# though, so it's worth thinking about.
|
| 211 |
+
executables = {}
|
| 212 |
+
|
| 213 |
+
# Private class data (need to distinguish C from C++ source for compiler)
|
| 214 |
+
_c_extensions = ['.c']
|
| 215 |
+
_cpp_extensions = ['.cc', '.cpp', '.cxx']
|
| 216 |
+
_rc_extensions = ['.rc']
|
| 217 |
+
_mc_extensions = ['.mc']
|
| 218 |
+
|
| 219 |
+
# Needed for the filename generation methods provided by the
|
| 220 |
+
# base class, CCompiler.
|
| 221 |
+
src_extensions = (_c_extensions + _cpp_extensions +
|
| 222 |
+
_rc_extensions + _mc_extensions)
|
| 223 |
+
res_extension = '.res'
|
| 224 |
+
obj_extension = '.obj'
|
| 225 |
+
static_lib_extension = '.lib'
|
| 226 |
+
shared_lib_extension = '.dll'
|
| 227 |
+
static_lib_format = shared_lib_format = '%s%s'
|
| 228 |
+
exe_extension = '.exe'
|
| 229 |
+
|
| 230 |
+
def __init__(self, verbose=0, dry_run=0, force=0):
|
| 231 |
+
CCompiler.__init__ (self, verbose, dry_run, force)
|
| 232 |
+
self.__version = get_build_version()
|
| 233 |
+
self.__arch = get_build_architecture()
|
| 234 |
+
if self.__arch == "Intel":
|
| 235 |
+
# x86
|
| 236 |
+
if self.__version >= 7:
|
| 237 |
+
self.__root = r"Software\Microsoft\VisualStudio"
|
| 238 |
+
self.__macros = MacroExpander(self.__version)
|
| 239 |
+
else:
|
| 240 |
+
self.__root = r"Software\Microsoft\Devstudio"
|
| 241 |
+
self.__product = "Visual Studio version %s" % self.__version
|
| 242 |
+
else:
|
| 243 |
+
# Win64. Assume this was built with the platform SDK
|
| 244 |
+
self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
|
| 245 |
+
|
| 246 |
+
self.initialized = False
|
| 247 |
+
|
| 248 |
+
def initialize(self):
|
| 249 |
+
self.__paths = []
|
| 250 |
+
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
|
| 251 |
+
# Assume that the SDK set up everything alright; don't try to be
|
| 252 |
+
# smarter
|
| 253 |
+
self.cc = "cl.exe"
|
| 254 |
+
self.linker = "link.exe"
|
| 255 |
+
self.lib = "lib.exe"
|
| 256 |
+
self.rc = "rc.exe"
|
| 257 |
+
self.mc = "mc.exe"
|
| 258 |
+
else:
|
| 259 |
+
self.__paths = self.get_msvc_paths("path")
|
| 260 |
+
|
| 261 |
+
if len(self.__paths) == 0:
|
| 262 |
+
raise DistutilsPlatformError("Python was built with %s, "
|
| 263 |
+
"and extensions need to be built with the same "
|
| 264 |
+
"version of the compiler, but it isn't installed."
|
| 265 |
+
% self.__product)
|
| 266 |
+
|
| 267 |
+
self.cc = self.find_exe("cl.exe")
|
| 268 |
+
self.linker = self.find_exe("link.exe")
|
| 269 |
+
self.lib = self.find_exe("lib.exe")
|
| 270 |
+
self.rc = self.find_exe("rc.exe") # resource compiler
|
| 271 |
+
self.mc = self.find_exe("mc.exe") # message compiler
|
| 272 |
+
self.set_path_env_var('lib')
|
| 273 |
+
self.set_path_env_var('include')
|
| 274 |
+
|
| 275 |
+
# extend the MSVC path with the current path
|
| 276 |
+
try:
|
| 277 |
+
for p in os.environ['path'].split(';'):
|
| 278 |
+
self.__paths.append(p)
|
| 279 |
+
except KeyError:
|
| 280 |
+
pass
|
| 281 |
+
self.__paths = normalize_and_reduce_paths(self.__paths)
|
| 282 |
+
os.environ['path'] = ";".join(self.__paths)
|
| 283 |
+
|
| 284 |
+
self.preprocess_options = None
|
| 285 |
+
if self.__arch == "Intel":
|
| 286 |
+
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
|
| 287 |
+
'/DNDEBUG']
|
| 288 |
+
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
|
| 289 |
+
'/Z7', '/D_DEBUG']
|
| 290 |
+
else:
|
| 291 |
+
# Win64
|
| 292 |
+
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
|
| 293 |
+
'/DNDEBUG']
|
| 294 |
+
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
|
| 295 |
+
'/Z7', '/D_DEBUG']
|
| 296 |
+
|
| 297 |
+
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
|
| 298 |
+
if self.__version >= 7:
|
| 299 |
+
self.ldflags_shared_debug = [
|
| 300 |
+
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
|
| 301 |
+
]
|
| 302 |
+
else:
|
| 303 |
+
self.ldflags_shared_debug = [
|
| 304 |
+
'/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
|
| 305 |
+
]
|
| 306 |
+
self.ldflags_static = [ '/nologo']
|
| 307 |
+
|
| 308 |
+
self.initialized = True
|
| 309 |
+
|
| 310 |
+
# -- Worker methods ------------------------------------------------
|
| 311 |
+
|
| 312 |
+
def object_filenames(self,
|
| 313 |
+
source_filenames,
|
| 314 |
+
strip_dir=0,
|
| 315 |
+
output_dir=''):
|
| 316 |
+
# Copied from ccompiler.py, extended to return .res as 'object'-file
|
| 317 |
+
# for .rc input file
|
| 318 |
+
if output_dir is None: output_dir = ''
|
| 319 |
+
obj_names = []
|
| 320 |
+
for src_name in source_filenames:
|
| 321 |
+
(base, ext) = os.path.splitext (src_name)
|
| 322 |
+
base = os.path.splitdrive(base)[1] # Chop off the drive
|
| 323 |
+
base = base[os.path.isabs(base):] # If abs, chop off leading /
|
| 324 |
+
if ext not in self.src_extensions:
|
| 325 |
+
# Better to raise an exception instead of silently continuing
|
| 326 |
+
# and later complain about sources and targets having
|
| 327 |
+
# different lengths
|
| 328 |
+
raise CompileError ("Don't know how to compile %s" % src_name)
|
| 329 |
+
if strip_dir:
|
| 330 |
+
base = os.path.basename (base)
|
| 331 |
+
if ext in self._rc_extensions:
|
| 332 |
+
obj_names.append (os.path.join (output_dir,
|
| 333 |
+
base + self.res_extension))
|
| 334 |
+
elif ext in self._mc_extensions:
|
| 335 |
+
obj_names.append (os.path.join (output_dir,
|
| 336 |
+
base + self.res_extension))
|
| 337 |
+
else:
|
| 338 |
+
obj_names.append (os.path.join (output_dir,
|
| 339 |
+
base + self.obj_extension))
|
| 340 |
+
return obj_names
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def compile(self, sources,
|
| 344 |
+
output_dir=None, macros=None, include_dirs=None, debug=0,
|
| 345 |
+
extra_preargs=None, extra_postargs=None, depends=None):
|
| 346 |
+
|
| 347 |
+
if not self.initialized:
|
| 348 |
+
self.initialize()
|
| 349 |
+
compile_info = self._setup_compile(output_dir, macros, include_dirs,
|
| 350 |
+
sources, depends, extra_postargs)
|
| 351 |
+
macros, objects, extra_postargs, pp_opts, build = compile_info
|
| 352 |
+
|
| 353 |
+
compile_opts = extra_preargs or []
|
| 354 |
+
compile_opts.append ('/c')
|
| 355 |
+
if debug:
|
| 356 |
+
compile_opts.extend(self.compile_options_debug)
|
| 357 |
+
else:
|
| 358 |
+
compile_opts.extend(self.compile_options)
|
| 359 |
+
|
| 360 |
+
for obj in objects:
|
| 361 |
+
try:
|
| 362 |
+
src, ext = build[obj]
|
| 363 |
+
except KeyError:
|
| 364 |
+
continue
|
| 365 |
+
if debug:
|
| 366 |
+
# pass the full pathname to MSVC in debug mode,
|
| 367 |
+
# this allows the debugger to find the source file
|
| 368 |
+
# without asking the user to browse for it
|
| 369 |
+
src = os.path.abspath(src)
|
| 370 |
+
|
| 371 |
+
if ext in self._c_extensions:
|
| 372 |
+
input_opt = "/Tc" + src
|
| 373 |
+
elif ext in self._cpp_extensions:
|
| 374 |
+
input_opt = "/Tp" + src
|
| 375 |
+
elif ext in self._rc_extensions:
|
| 376 |
+
# compile .RC to .RES file
|
| 377 |
+
input_opt = src
|
| 378 |
+
output_opt = "/fo" + obj
|
| 379 |
+
try:
|
| 380 |
+
self.spawn([self.rc] + pp_opts +
|
| 381 |
+
[output_opt] + [input_opt])
|
| 382 |
+
except DistutilsExecError as msg:
|
| 383 |
+
raise CompileError(msg)
|
| 384 |
+
continue
|
| 385 |
+
elif ext in self._mc_extensions:
|
| 386 |
+
# Compile .MC to .RC file to .RES file.
|
| 387 |
+
# * '-h dir' specifies the directory for the
|
| 388 |
+
# generated include file
|
| 389 |
+
# * '-r dir' specifies the target directory of the
|
| 390 |
+
# generated RC file and the binary message resource
|
| 391 |
+
# it includes
|
| 392 |
+
#
|
| 393 |
+
# For now (since there are no options to change this),
|
| 394 |
+
# we use the source-directory for the include file and
|
| 395 |
+
# the build directory for the RC file and message
|
| 396 |
+
# resources. This works at least for win32all.
|
| 397 |
+
h_dir = os.path.dirname(src)
|
| 398 |
+
rc_dir = os.path.dirname(obj)
|
| 399 |
+
try:
|
| 400 |
+
# first compile .MC to .RC and .H file
|
| 401 |
+
self.spawn([self.mc] +
|
| 402 |
+
['-h', h_dir, '-r', rc_dir] + [src])
|
| 403 |
+
base, _ = os.path.splitext (os.path.basename (src))
|
| 404 |
+
rc_file = os.path.join (rc_dir, base + '.rc')
|
| 405 |
+
# then compile .RC to .RES file
|
| 406 |
+
self.spawn([self.rc] +
|
| 407 |
+
["/fo" + obj] + [rc_file])
|
| 408 |
+
|
| 409 |
+
except DistutilsExecError as msg:
|
| 410 |
+
raise CompileError(msg)
|
| 411 |
+
continue
|
| 412 |
+
else:
|
| 413 |
+
# how to handle this file?
|
| 414 |
+
raise CompileError("Don't know how to compile %s to %s"
|
| 415 |
+
% (src, obj))
|
| 416 |
+
|
| 417 |
+
output_opt = "/Fo" + obj
|
| 418 |
+
try:
|
| 419 |
+
self.spawn([self.cc] + compile_opts + pp_opts +
|
| 420 |
+
[input_opt, output_opt] +
|
| 421 |
+
extra_postargs)
|
| 422 |
+
except DistutilsExecError as msg:
|
| 423 |
+
raise CompileError(msg)
|
| 424 |
+
|
| 425 |
+
return objects
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
def create_static_lib(self,
|
| 429 |
+
objects,
|
| 430 |
+
output_libname,
|
| 431 |
+
output_dir=None,
|
| 432 |
+
debug=0,
|
| 433 |
+
target_lang=None):
|
| 434 |
+
|
| 435 |
+
if not self.initialized:
|
| 436 |
+
self.initialize()
|
| 437 |
+
(objects, output_dir) = self._fix_object_args(objects, output_dir)
|
| 438 |
+
output_filename = self.library_filename(output_libname,
|
| 439 |
+
output_dir=output_dir)
|
| 440 |
+
|
| 441 |
+
if self._need_link(objects, output_filename):
|
| 442 |
+
lib_args = objects + ['/OUT:' + output_filename]
|
| 443 |
+
if debug:
|
| 444 |
+
pass # XXX what goes here?
|
| 445 |
+
try:
|
| 446 |
+
self.spawn([self.lib] + lib_args)
|
| 447 |
+
except DistutilsExecError as msg:
|
| 448 |
+
raise LibError(msg)
|
| 449 |
+
else:
|
| 450 |
+
log.debug("skipping %s (up-to-date)", output_filename)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def link(self,
|
| 454 |
+
target_desc,
|
| 455 |
+
objects,
|
| 456 |
+
output_filename,
|
| 457 |
+
output_dir=None,
|
| 458 |
+
libraries=None,
|
| 459 |
+
library_dirs=None,
|
| 460 |
+
runtime_library_dirs=None,
|
| 461 |
+
export_symbols=None,
|
| 462 |
+
debug=0,
|
| 463 |
+
extra_preargs=None,
|
| 464 |
+
extra_postargs=None,
|
| 465 |
+
build_temp=None,
|
| 466 |
+
target_lang=None):
|
| 467 |
+
|
| 468 |
+
if not self.initialized:
|
| 469 |
+
self.initialize()
|
| 470 |
+
(objects, output_dir) = self._fix_object_args(objects, output_dir)
|
| 471 |
+
fixed_args = self._fix_lib_args(libraries, library_dirs,
|
| 472 |
+
runtime_library_dirs)
|
| 473 |
+
(libraries, library_dirs, runtime_library_dirs) = fixed_args
|
| 474 |
+
|
| 475 |
+
if runtime_library_dirs:
|
| 476 |
+
self.warn ("I don't know what to do with 'runtime_library_dirs': "
|
| 477 |
+
+ str (runtime_library_dirs))
|
| 478 |
+
|
| 479 |
+
lib_opts = gen_lib_options(self,
|
| 480 |
+
library_dirs, runtime_library_dirs,
|
| 481 |
+
libraries)
|
| 482 |
+
if output_dir is not None:
|
| 483 |
+
output_filename = os.path.join(output_dir, output_filename)
|
| 484 |
+
|
| 485 |
+
if self._need_link(objects, output_filename):
|
| 486 |
+
if target_desc == CCompiler.EXECUTABLE:
|
| 487 |
+
if debug:
|
| 488 |
+
ldflags = self.ldflags_shared_debug[1:]
|
| 489 |
+
else:
|
| 490 |
+
ldflags = self.ldflags_shared[1:]
|
| 491 |
+
else:
|
| 492 |
+
if debug:
|
| 493 |
+
ldflags = self.ldflags_shared_debug
|
| 494 |
+
else:
|
| 495 |
+
ldflags = self.ldflags_shared
|
| 496 |
+
|
| 497 |
+
export_opts = []
|
| 498 |
+
for sym in (export_symbols or []):
|
| 499 |
+
export_opts.append("/EXPORT:" + sym)
|
| 500 |
+
|
| 501 |
+
ld_args = (ldflags + lib_opts + export_opts +
|
| 502 |
+
objects + ['/OUT:' + output_filename])
|
| 503 |
+
|
| 504 |
+
# The MSVC linker generates .lib and .exp files, which cannot be
|
| 505 |
+
# suppressed by any linker switches. The .lib files may even be
|
| 506 |
+
# needed! Make sure they are generated in the temporary build
|
| 507 |
+
# directory. Since they have different names for debug and release
|
| 508 |
+
# builds, they can go into the same directory.
|
| 509 |
+
if export_symbols is not None:
|
| 510 |
+
(dll_name, dll_ext) = os.path.splitext(
|
| 511 |
+
os.path.basename(output_filename))
|
| 512 |
+
implib_file = os.path.join(
|
| 513 |
+
os.path.dirname(objects[0]),
|
| 514 |
+
self.library_filename(dll_name))
|
| 515 |
+
ld_args.append ('/IMPLIB:' + implib_file)
|
| 516 |
+
|
| 517 |
+
if extra_preargs:
|
| 518 |
+
ld_args[:0] = extra_preargs
|
| 519 |
+
if extra_postargs:
|
| 520 |
+
ld_args.extend(extra_postargs)
|
| 521 |
+
|
| 522 |
+
self.mkpath(os.path.dirname(output_filename))
|
| 523 |
+
try:
|
| 524 |
+
self.spawn([self.linker] + ld_args)
|
| 525 |
+
except DistutilsExecError as msg:
|
| 526 |
+
raise LinkError(msg)
|
| 527 |
+
|
| 528 |
+
else:
|
| 529 |
+
log.debug("skipping %s (up-to-date)", output_filename)
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
# -- Miscellaneous methods -----------------------------------------
|
| 533 |
+
# These are all used by the 'gen_lib_options() function, in
|
| 534 |
+
# ccompiler.py.
|
| 535 |
+
|
| 536 |
+
def library_dir_option(self, dir):
|
| 537 |
+
return "/LIBPATH:" + dir
|
| 538 |
+
|
| 539 |
+
def runtime_library_dir_option(self, dir):
|
| 540 |
+
raise DistutilsPlatformError(
|
| 541 |
+
"don't know how to set runtime library search path for MSVC++")
|
| 542 |
+
|
| 543 |
+
def library_option(self, lib):
|
| 544 |
+
return self.library_filename(lib)
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
def find_library_file(self, dirs, lib, debug=0):
|
| 548 |
+
# Prefer a debugging library if found (and requested), but deal
|
| 549 |
+
# with it if we don't have one.
|
| 550 |
+
if debug:
|
| 551 |
+
try_names = [lib + "_d", lib]
|
| 552 |
+
else:
|
| 553 |
+
try_names = [lib]
|
| 554 |
+
for dir in dirs:
|
| 555 |
+
for name in try_names:
|
| 556 |
+
libfile = os.path.join(dir, self.library_filename (name))
|
| 557 |
+
if os.path.exists(libfile):
|
| 558 |
+
return libfile
|
| 559 |
+
else:
|
| 560 |
+
# Oops, didn't find it in *any* of 'dirs'
|
| 561 |
+
return None
|
| 562 |
+
|
| 563 |
+
# Helper methods for using the MSVC registry settings
|
| 564 |
+
|
| 565 |
+
def find_exe(self, exe):
|
| 566 |
+
"""Return path to an MSVC executable program.
|
| 567 |
+
|
| 568 |
+
Tries to find the program in several places: first, one of the
|
| 569 |
+
MSVC program search paths from the registry; next, the directories
|
| 570 |
+
in the PATH environment variable. If any of those work, return an
|
| 571 |
+
absolute path that is known to exist. If none of them work, just
|
| 572 |
+
return the original program name, 'exe'.
|
| 573 |
+
"""
|
| 574 |
+
for p in self.__paths:
|
| 575 |
+
fn = os.path.join(os.path.abspath(p), exe)
|
| 576 |
+
if os.path.isfile(fn):
|
| 577 |
+
return fn
|
| 578 |
+
|
| 579 |
+
# didn't find it; try existing path
|
| 580 |
+
for p in os.environ['Path'].split(';'):
|
| 581 |
+
fn = os.path.join(os.path.abspath(p),exe)
|
| 582 |
+
if os.path.isfile(fn):
|
| 583 |
+
return fn
|
| 584 |
+
|
| 585 |
+
return exe
|
| 586 |
+
|
| 587 |
+
def get_msvc_paths(self, path, platform='x86'):
|
| 588 |
+
"""Get a list of devstudio directories (include, lib or path).
|
| 589 |
+
|
| 590 |
+
Return a list of strings. The list will be empty if unable to
|
| 591 |
+
access the registry or appropriate registry keys not found.
|
| 592 |
+
"""
|
| 593 |
+
if not _can_read_reg:
|
| 594 |
+
return []
|
| 595 |
+
|
| 596 |
+
path = path + " dirs"
|
| 597 |
+
if self.__version >= 7:
|
| 598 |
+
key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
|
| 599 |
+
% (self.__root, self.__version))
|
| 600 |
+
else:
|
| 601 |
+
key = (r"%s\6.0\Build System\Components\Platforms"
|
| 602 |
+
r"\Win32 (%s)\Directories" % (self.__root, platform))
|
| 603 |
+
|
| 604 |
+
for base in HKEYS:
|
| 605 |
+
d = read_values(base, key)
|
| 606 |
+
if d:
|
| 607 |
+
if self.__version >= 7:
|
| 608 |
+
return self.__macros.sub(d[path]).split(";")
|
| 609 |
+
else:
|
| 610 |
+
return d[path].split(";")
|
| 611 |
+
# MSVC 6 seems to create the registry entries we need only when
|
| 612 |
+
# the GUI is run.
|
| 613 |
+
if self.__version == 6:
|
| 614 |
+
for base in HKEYS:
|
| 615 |
+
if read_values(base, r"%s\6.0" % self.__root) is not None:
|
| 616 |
+
self.warn("It seems you have Visual Studio 6 installed, "
|
| 617 |
+
"but the expected registry settings are not present.\n"
|
| 618 |
+
"You must at least run the Visual Studio GUI once "
|
| 619 |
+
"so that these entries are created.")
|
| 620 |
+
break
|
| 621 |
+
return []
|
| 622 |
+
|
| 623 |
+
def set_path_env_var(self, name):
|
| 624 |
+
"""Set environment variable 'name' to an MSVC path type value.
|
| 625 |
+
|
| 626 |
+
This is equivalent to a SET command prior to execution of spawned
|
| 627 |
+
commands.
|
| 628 |
+
"""
|
| 629 |
+
|
| 630 |
+
if name == "lib":
|
| 631 |
+
p = self.get_msvc_paths("library")
|
| 632 |
+
else:
|
| 633 |
+
p = self.get_msvc_paths(name)
|
| 634 |
+
if p:
|
| 635 |
+
os.environ[name] = ';'.join(p)
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
if get_build_version() >= 8.0:
|
| 639 |
+
log.debug("Importing new compiler from distutils.msvc9compiler")
|
| 640 |
+
OldMSVCCompiler = MSVCCompiler
|
| 641 |
+
from distutils.msvc9compiler import MSVCCompiler
|
| 642 |
+
# get_build_architecture not really relevant now we support cross-compile
|
| 643 |
+
from distutils.msvc9compiler import MacroExpander
|
evalkit_llava/lib/python3.10/distutils/sysconfig.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Provide access to Python's configuration information. The specific
|
| 2 |
+
configuration variables available depend heavily on the platform and
|
| 3 |
+
configuration. The values may be retrieved using
|
| 4 |
+
get_config_var(name), and the list of variables is available via
|
| 5 |
+
get_config_vars().keys(). Additional convenience functions are also
|
| 6 |
+
available.
|
| 7 |
+
|
| 8 |
+
Written by: Fred L. Drake, Jr.
|
| 9 |
+
Email: <fdrake@acm.org>
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import _imp
|
| 13 |
+
import os
|
| 14 |
+
import re
|
| 15 |
+
import sys
|
| 16 |
+
import warnings
|
| 17 |
+
|
| 18 |
+
from functools import partial
|
| 19 |
+
|
| 20 |
+
from .errors import DistutilsPlatformError
|
| 21 |
+
|
| 22 |
+
from sysconfig import (
|
| 23 |
+
_PREFIX as PREFIX,
|
| 24 |
+
_BASE_PREFIX as BASE_PREFIX,
|
| 25 |
+
_EXEC_PREFIX as EXEC_PREFIX,
|
| 26 |
+
_BASE_EXEC_PREFIX as BASE_EXEC_PREFIX,
|
| 27 |
+
_PROJECT_BASE as project_base,
|
| 28 |
+
_PYTHON_BUILD as python_build,
|
| 29 |
+
_init_posix as sysconfig_init_posix,
|
| 30 |
+
parse_config_h as sysconfig_parse_config_h,
|
| 31 |
+
|
| 32 |
+
_init_non_posix,
|
| 33 |
+
_is_python_source_dir,
|
| 34 |
+
_sys_home,
|
| 35 |
+
|
| 36 |
+
_variable_rx,
|
| 37 |
+
_findvar1_rx,
|
| 38 |
+
_findvar2_rx,
|
| 39 |
+
|
| 40 |
+
expand_makefile_vars,
|
| 41 |
+
is_python_build,
|
| 42 |
+
get_config_h_filename,
|
| 43 |
+
get_config_var,
|
| 44 |
+
get_config_vars,
|
| 45 |
+
get_makefile_filename,
|
| 46 |
+
get_python_version,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# This is better than
|
| 50 |
+
# from sysconfig import _CONFIG_VARS as _config_vars
|
| 51 |
+
# because it makes sure that the global dictionary is initialized
|
| 52 |
+
# which might not be true in the time of import.
|
| 53 |
+
_config_vars = get_config_vars()
|
| 54 |
+
|
| 55 |
+
if os.name == "nt":
|
| 56 |
+
from sysconfig import _fix_pcbuild
|
| 57 |
+
|
| 58 |
+
warnings.warn(
|
| 59 |
+
'The distutils.sysconfig module is deprecated, use sysconfig instead',
|
| 60 |
+
DeprecationWarning,
|
| 61 |
+
stacklevel=2
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Following functions are the same as in sysconfig but with different API
|
| 66 |
+
def parse_config_h(fp, g=None):
|
| 67 |
+
return sysconfig_parse_config_h(fp, vars=g)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
_python_build = partial(is_python_build, check_home=True)
|
| 71 |
+
_init_posix = partial(sysconfig_init_posix, _config_vars)
|
| 72 |
+
_init_nt = partial(_init_non_posix, _config_vars)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# Similar function is also implemented in sysconfig as _parse_makefile
|
| 76 |
+
# but without the parsing capabilities of distutils.text_file.TextFile.
|
| 77 |
+
def parse_makefile(fn, g=None):
|
| 78 |
+
"""Parse a Makefile-style file.
|
| 79 |
+
A dictionary containing name/value pairs is returned. If an
|
| 80 |
+
optional dictionary is passed in as the second argument, it is
|
| 81 |
+
used instead of a new dictionary.
|
| 82 |
+
"""
|
| 83 |
+
from distutils.text_file import TextFile
|
| 84 |
+
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
|
| 85 |
+
|
| 86 |
+
if g is None:
|
| 87 |
+
g = {}
|
| 88 |
+
done = {}
|
| 89 |
+
notdone = {}
|
| 90 |
+
|
| 91 |
+
while True:
|
| 92 |
+
line = fp.readline()
|
| 93 |
+
if line is None: # eof
|
| 94 |
+
break
|
| 95 |
+
m = re.match(_variable_rx, line)
|
| 96 |
+
if m:
|
| 97 |
+
n, v = m.group(1, 2)
|
| 98 |
+
v = v.strip()
|
| 99 |
+
# `$$' is a literal `$' in make
|
| 100 |
+
tmpv = v.replace('$$', '')
|
| 101 |
+
|
| 102 |
+
if "$" in tmpv:
|
| 103 |
+
notdone[n] = v
|
| 104 |
+
else:
|
| 105 |
+
try:
|
| 106 |
+
v = int(v)
|
| 107 |
+
except ValueError:
|
| 108 |
+
# insert literal `$'
|
| 109 |
+
done[n] = v.replace('$$', '$')
|
| 110 |
+
else:
|
| 111 |
+
done[n] = v
|
| 112 |
+
|
| 113 |
+
# Variables with a 'PY_' prefix in the makefile. These need to
|
| 114 |
+
# be made available without that prefix through sysconfig.
|
| 115 |
+
# Special care is needed to ensure that variable expansion works, even
|
| 116 |
+
# if the expansion uses the name without a prefix.
|
| 117 |
+
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
|
| 118 |
+
|
| 119 |
+
# do variable interpolation here
|
| 120 |
+
while notdone:
|
| 121 |
+
for name in list(notdone):
|
| 122 |
+
value = notdone[name]
|
| 123 |
+
m = re.search(_findvar1_rx, value) or re.search(_findvar2_rx, value)
|
| 124 |
+
if m:
|
| 125 |
+
n = m.group(1)
|
| 126 |
+
found = True
|
| 127 |
+
if n in done:
|
| 128 |
+
item = str(done[n])
|
| 129 |
+
elif n in notdone:
|
| 130 |
+
# get it on a subsequent round
|
| 131 |
+
found = False
|
| 132 |
+
elif n in os.environ:
|
| 133 |
+
# do it like make: fall back to environment
|
| 134 |
+
item = os.environ[n]
|
| 135 |
+
|
| 136 |
+
elif n in renamed_variables:
|
| 137 |
+
if name.startswith('PY_') and name[3:] in renamed_variables:
|
| 138 |
+
item = ""
|
| 139 |
+
|
| 140 |
+
elif 'PY_' + n in notdone:
|
| 141 |
+
found = False
|
| 142 |
+
|
| 143 |
+
else:
|
| 144 |
+
item = str(done['PY_' + n])
|
| 145 |
+
else:
|
| 146 |
+
done[n] = item = ""
|
| 147 |
+
if found:
|
| 148 |
+
after = value[m.end():]
|
| 149 |
+
value = value[:m.start()] + item + after
|
| 150 |
+
if "$" in after:
|
| 151 |
+
notdone[name] = value
|
| 152 |
+
else:
|
| 153 |
+
try: value = int(value)
|
| 154 |
+
except ValueError:
|
| 155 |
+
done[name] = value.strip()
|
| 156 |
+
else:
|
| 157 |
+
done[name] = value
|
| 158 |
+
del notdone[name]
|
| 159 |
+
|
| 160 |
+
if name.startswith('PY_') \
|
| 161 |
+
and name[3:] in renamed_variables:
|
| 162 |
+
|
| 163 |
+
name = name[3:]
|
| 164 |
+
if name not in done:
|
| 165 |
+
done[name] = value
|
| 166 |
+
else:
|
| 167 |
+
# bogus variable reference; just drop it since we can't deal
|
| 168 |
+
del notdone[name]
|
| 169 |
+
|
| 170 |
+
fp.close()
|
| 171 |
+
|
| 172 |
+
# strip spurious spaces
|
| 173 |
+
for k, v in done.items():
|
| 174 |
+
if isinstance(v, str):
|
| 175 |
+
done[k] = v.strip()
|
| 176 |
+
|
| 177 |
+
# save the results in the global dictionary
|
| 178 |
+
g.update(done)
|
| 179 |
+
return g
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
# Following functions are deprecated together with this module and they
|
| 183 |
+
# have no direct replacement
|
| 184 |
+
|
| 185 |
+
# Calculate the build qualifier flags if they are defined. Adding the flags
|
| 186 |
+
# to the include and lib directories only makes sense for an installation, not
|
| 187 |
+
# an in-source build.
|
| 188 |
+
build_flags = ''
|
| 189 |
+
try:
|
| 190 |
+
if not python_build:
|
| 191 |
+
build_flags = sys.abiflags
|
| 192 |
+
except AttributeError:
|
| 193 |
+
# It's not a configure-based build, so the sys module doesn't have
|
| 194 |
+
# this attribute, which is fine.
|
| 195 |
+
pass
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def customize_compiler(compiler):
|
| 199 |
+
"""Do any platform-specific customization of a CCompiler instance.
|
| 200 |
+
|
| 201 |
+
Mainly needed on Unix, so we can plug in the information that
|
| 202 |
+
varies across Unices and is stored in Python's Makefile.
|
| 203 |
+
"""
|
| 204 |
+
if compiler.compiler_type == "unix":
|
| 205 |
+
if sys.platform == "darwin":
|
| 206 |
+
# Perform first-time customization of compiler-related
|
| 207 |
+
# config vars on OS X now that we know we need a compiler.
|
| 208 |
+
# This is primarily to support Pythons from binary
|
| 209 |
+
# installers. The kind and paths to build tools on
|
| 210 |
+
# the user system may vary significantly from the system
|
| 211 |
+
# that Python itself was built on. Also the user OS
|
| 212 |
+
# version and build tools may not support the same set
|
| 213 |
+
# of CPU architectures for universal builds.
|
| 214 |
+
if not _config_vars.get('CUSTOMIZED_OSX_COMPILER'):
|
| 215 |
+
import _osx_support
|
| 216 |
+
_osx_support.customize_compiler(_config_vars)
|
| 217 |
+
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
|
| 218 |
+
|
| 219 |
+
(cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
|
| 220 |
+
get_config_vars('CC', 'CXX', 'CFLAGS',
|
| 221 |
+
'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
|
| 222 |
+
|
| 223 |
+
if 'CC' in os.environ:
|
| 224 |
+
newcc = os.environ['CC']
|
| 225 |
+
if ('LDSHARED' not in os.environ
|
| 226 |
+
and ldshared.startswith(cc)):
|
| 227 |
+
# If CC is overridden, use that as the default
|
| 228 |
+
# command for LDSHARED as well
|
| 229 |
+
ldshared = newcc + ldshared[len(cc):]
|
| 230 |
+
cc = newcc
|
| 231 |
+
if 'CXX' in os.environ:
|
| 232 |
+
cxx = os.environ['CXX']
|
| 233 |
+
if 'LDSHARED' in os.environ:
|
| 234 |
+
ldshared = os.environ['LDSHARED']
|
| 235 |
+
if 'CPP' in os.environ:
|
| 236 |
+
cpp = os.environ['CPP']
|
| 237 |
+
else:
|
| 238 |
+
cpp = cc + " -E" # not always
|
| 239 |
+
if 'LDFLAGS' in os.environ:
|
| 240 |
+
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
|
| 241 |
+
if 'CFLAGS' in os.environ:
|
| 242 |
+
cflags = cflags + ' ' + os.environ['CFLAGS']
|
| 243 |
+
ldshared = ldshared + ' ' + os.environ['CFLAGS']
|
| 244 |
+
if 'CPPFLAGS' in os.environ:
|
| 245 |
+
cpp = cpp + ' ' + os.environ['CPPFLAGS']
|
| 246 |
+
cflags = cflags + ' ' + os.environ['CPPFLAGS']
|
| 247 |
+
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
|
| 248 |
+
if 'AR' in os.environ:
|
| 249 |
+
ar = os.environ['AR']
|
| 250 |
+
if 'ARFLAGS' in os.environ:
|
| 251 |
+
archiver = ar + ' ' + os.environ['ARFLAGS']
|
| 252 |
+
else:
|
| 253 |
+
archiver = ar + ' ' + ar_flags
|
| 254 |
+
|
| 255 |
+
cc_cmd = cc + ' ' + cflags
|
| 256 |
+
compiler.set_executables(
|
| 257 |
+
preprocessor=cpp,
|
| 258 |
+
compiler=cc_cmd,
|
| 259 |
+
compiler_so=cc_cmd + ' ' + ccshared,
|
| 260 |
+
compiler_cxx=cxx,
|
| 261 |
+
linker_so=ldshared,
|
| 262 |
+
linker_exe=cc,
|
| 263 |
+
archiver=archiver)
|
| 264 |
+
|
| 265 |
+
if 'RANLIB' in os.environ and 'ranlib' in compiler.executables:
|
| 266 |
+
compiler.set_executables(ranlib=os.environ['RANLIB'])
|
| 267 |
+
|
| 268 |
+
compiler.shared_lib_extension = shlib_suffix
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def get_python_inc(plat_specific=0, prefix=None):
|
| 272 |
+
"""Return the directory containing installed Python header files.
|
| 273 |
+
|
| 274 |
+
If 'plat_specific' is false (the default), this is the path to the
|
| 275 |
+
non-platform-specific header files, i.e. Python.h and so on;
|
| 276 |
+
otherwise, this is the path to platform-specific header files
|
| 277 |
+
(namely pyconfig.h).
|
| 278 |
+
|
| 279 |
+
If 'prefix' is supplied, use it instead of sys.base_prefix or
|
| 280 |
+
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
|
| 281 |
+
"""
|
| 282 |
+
if prefix is None:
|
| 283 |
+
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
|
| 284 |
+
if os.name == "posix":
|
| 285 |
+
if python_build:
|
| 286 |
+
# Assume the executable is in the build directory. The
|
| 287 |
+
# pyconfig.h file should be in the same directory. Since
|
| 288 |
+
# the build directory may not be the source directory, we
|
| 289 |
+
# must use "srcdir" from the makefile to find the "Include"
|
| 290 |
+
# directory.
|
| 291 |
+
if plat_specific:
|
| 292 |
+
return _sys_home or project_base
|
| 293 |
+
else:
|
| 294 |
+
incdir = os.path.join(get_config_var('srcdir'), 'Include')
|
| 295 |
+
return os.path.normpath(incdir)
|
| 296 |
+
python_dir = 'python' + get_python_version() + build_flags
|
| 297 |
+
return os.path.join(prefix, "include", python_dir)
|
| 298 |
+
elif os.name == "nt":
|
| 299 |
+
if python_build:
|
| 300 |
+
# Include both the include and PC dir to ensure we can find
|
| 301 |
+
# pyconfig.h
|
| 302 |
+
return (os.path.join(prefix, "include") + os.path.pathsep +
|
| 303 |
+
os.path.join(prefix, "PC"))
|
| 304 |
+
return os.path.join(prefix, "include")
|
| 305 |
+
else:
|
| 306 |
+
raise DistutilsPlatformError(
|
| 307 |
+
"I don't know where Python installs its C header files "
|
| 308 |
+
"on platform '%s'" % os.name)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
|
| 312 |
+
"""Return the directory containing the Python library (standard or
|
| 313 |
+
site additions).
|
| 314 |
+
|
| 315 |
+
If 'plat_specific' is true, return the directory containing
|
| 316 |
+
platform-specific modules, i.e. any module from a non-pure-Python
|
| 317 |
+
module distribution; otherwise, return the platform-shared library
|
| 318 |
+
directory. If 'standard_lib' is true, return the directory
|
| 319 |
+
containing standard Python library modules; otherwise, return the
|
| 320 |
+
directory for site-specific modules.
|
| 321 |
+
|
| 322 |
+
If 'prefix' is supplied, use it instead of sys.base_prefix or
|
| 323 |
+
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
|
| 324 |
+
"""
|
| 325 |
+
if prefix is None:
|
| 326 |
+
if standard_lib:
|
| 327 |
+
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
|
| 328 |
+
else:
|
| 329 |
+
prefix = plat_specific and EXEC_PREFIX or PREFIX
|
| 330 |
+
|
| 331 |
+
if os.name == "posix":
|
| 332 |
+
if plat_specific or standard_lib:
|
| 333 |
+
# Platform-specific modules (any module from a non-pure-Python
|
| 334 |
+
# module distribution) or standard Python library modules.
|
| 335 |
+
libdir = sys.platlibdir
|
| 336 |
+
else:
|
| 337 |
+
# Pure Python
|
| 338 |
+
libdir = "lib"
|
| 339 |
+
libpython = os.path.join(prefix, libdir,
|
| 340 |
+
"python" + get_python_version())
|
| 341 |
+
if standard_lib:
|
| 342 |
+
return libpython
|
| 343 |
+
else:
|
| 344 |
+
return os.path.join(libpython, "site-packages")
|
| 345 |
+
elif os.name == "nt":
|
| 346 |
+
if standard_lib:
|
| 347 |
+
return os.path.join(prefix, "Lib")
|
| 348 |
+
else:
|
| 349 |
+
return os.path.join(prefix, "Lib", "site-packages")
|
| 350 |
+
else:
|
| 351 |
+
raise DistutilsPlatformError(
|
| 352 |
+
"I don't know where Python installs its library "
|
| 353 |
+
"on platform '%s'" % os.name)
|
evalkit_llava/lib/python3.10/distutils/tests/Setup.sample
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Setup file from the pygame project
|
| 2 |
+
|
| 3 |
+
#--StartConfig
|
| 4 |
+
SDL = -I/usr/include/SDL -D_REENTRANT -lSDL
|
| 5 |
+
FONT = -lSDL_ttf
|
| 6 |
+
IMAGE = -lSDL_image
|
| 7 |
+
MIXER = -lSDL_mixer
|
| 8 |
+
SMPEG = -lsmpeg
|
| 9 |
+
PNG = -lpng
|
| 10 |
+
JPEG = -ljpeg
|
| 11 |
+
SCRAP = -lX11
|
| 12 |
+
PORTMIDI = -lportmidi
|
| 13 |
+
PORTTIME = -lporttime
|
| 14 |
+
#--EndConfig
|
| 15 |
+
|
| 16 |
+
#DEBUG = -C-W -C-Wall
|
| 17 |
+
DEBUG =
|
| 18 |
+
|
| 19 |
+
#the following modules are optional. you will want to compile
|
| 20 |
+
#everything you can, but you can ignore ones you don't have
|
| 21 |
+
#dependencies for, just comment them out
|
| 22 |
+
|
| 23 |
+
imageext src/imageext.c $(SDL) $(IMAGE) $(PNG) $(JPEG) $(DEBUG)
|
| 24 |
+
font src/font.c $(SDL) $(FONT) $(DEBUG)
|
| 25 |
+
mixer src/mixer.c $(SDL) $(MIXER) $(DEBUG)
|
| 26 |
+
mixer_music src/music.c $(SDL) $(MIXER) $(DEBUG)
|
| 27 |
+
_numericsurfarray src/_numericsurfarray.c $(SDL) $(DEBUG)
|
| 28 |
+
_numericsndarray src/_numericsndarray.c $(SDL) $(MIXER) $(DEBUG)
|
| 29 |
+
movie src/movie.c $(SDL) $(SMPEG) $(DEBUG)
|
| 30 |
+
scrap src/scrap.c $(SDL) $(SCRAP) $(DEBUG)
|
| 31 |
+
_camera src/_camera.c src/camera_v4l2.c src/camera_v4l.c $(SDL) $(DEBUG)
|
| 32 |
+
pypm src/pypm.c $(SDL) $(PORTMIDI) $(PORTTIME) $(DEBUG)
|
| 33 |
+
|
| 34 |
+
GFX = src/SDL_gfx/SDL_gfxPrimitives.c
|
| 35 |
+
#GFX = src/SDL_gfx/SDL_gfxBlitFunc.c src/SDL_gfx/SDL_gfxPrimitives.c
|
| 36 |
+
gfxdraw src/gfxdraw.c $(SDL) $(GFX) $(DEBUG)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
#these modules are required for pygame to run. they only require
|
| 41 |
+
#SDL as a dependency. these should not be altered
|
| 42 |
+
|
| 43 |
+
base src/base.c $(SDL) $(DEBUG)
|
| 44 |
+
cdrom src/cdrom.c $(SDL) $(DEBUG)
|
| 45 |
+
color src/color.c $(SDL) $(DEBUG)
|
| 46 |
+
constants src/constants.c $(SDL) $(DEBUG)
|
| 47 |
+
display src/display.c $(SDL) $(DEBUG)
|
| 48 |
+
event src/event.c $(SDL) $(DEBUG)
|
| 49 |
+
fastevent src/fastevent.c src/fastevents.c $(SDL) $(DEBUG)
|
| 50 |
+
key src/key.c $(SDL) $(DEBUG)
|
| 51 |
+
mouse src/mouse.c $(SDL) $(DEBUG)
|
| 52 |
+
rect src/rect.c $(SDL) $(DEBUG)
|
| 53 |
+
rwobject src/rwobject.c $(SDL) $(DEBUG)
|
| 54 |
+
surface src/surface.c src/alphablit.c src/surface_fill.c $(SDL) $(DEBUG)
|
| 55 |
+
surflock src/surflock.c $(SDL) $(DEBUG)
|
| 56 |
+
time src/time.c $(SDL) $(DEBUG)
|
| 57 |
+
joystick src/joystick.c $(SDL) $(DEBUG)
|
| 58 |
+
draw src/draw.c $(SDL) $(DEBUG)
|
| 59 |
+
image src/image.c $(SDL) $(DEBUG)
|
| 60 |
+
overlay src/overlay.c $(SDL) $(DEBUG)
|
| 61 |
+
transform src/transform.c src/rotozoom.c src/scale2x.c src/scale_mmx.c $(SDL) $(DEBUG)
|
| 62 |
+
mask src/mask.c src/bitmask.c $(SDL) $(DEBUG)
|
| 63 |
+
bufferproxy src/bufferproxy.c $(SDL) $(DEBUG)
|
| 64 |
+
pixelarray src/pixelarray.c $(SDL) $(DEBUG)
|
| 65 |
+
_arraysurfarray src/_arraysurfarray.c $(SDL) $(DEBUG)
|
| 66 |
+
|
| 67 |
+
|
evalkit_llava/lib/python3.10/distutils/tests/__init__.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test suite for distutils.
|
| 2 |
+
|
| 3 |
+
This test suite consists of a collection of test modules in the
|
| 4 |
+
distutils.tests package. Each test module has a name starting with
|
| 5 |
+
'test' and contains a function test_suite(). The function is expected
|
| 6 |
+
to return an initialized unittest.TestSuite instance.
|
| 7 |
+
|
| 8 |
+
Tests for the command classes in the distutils.command package are
|
| 9 |
+
included in distutils.tests as well, instead of using a separate
|
| 10 |
+
distutils.command.tests package, since command identification is done
|
| 11 |
+
by import rather than matching pre-defined names.
|
| 12 |
+
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import sys
|
| 17 |
+
import unittest
|
| 18 |
+
from test.support import run_unittest
|
| 19 |
+
from test.support.warnings_helper import save_restore_warnings_filters
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
here = os.path.dirname(__file__) or os.curdir
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def test_suite():
|
| 26 |
+
suite = unittest.TestSuite()
|
| 27 |
+
for fn in os.listdir(here):
|
| 28 |
+
if fn.startswith("test") and fn.endswith(".py"):
|
| 29 |
+
modname = "distutils.tests." + fn[:-3]
|
| 30 |
+
# bpo-40055: Save/restore warnings filters to leave them unchanged.
|
| 31 |
+
# Importing tests imports docutils which imports pkg_resources
|
| 32 |
+
# which adds a warnings filter.
|
| 33 |
+
with save_restore_warnings_filters():
|
| 34 |
+
__import__(modname)
|
| 35 |
+
module = sys.modules[modname]
|
| 36 |
+
suite.addTest(module.test_suite())
|
| 37 |
+
return suite
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
if __name__ == "__main__":
|
| 41 |
+
run_unittest(test_suite())
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_archive_util.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_bdist_dumb.cpython-310.pyc
ADDED
|
Binary file (3.12 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_bdist_rpm.cpython-310.pyc
ADDED
|
Binary file (3.62 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_build.cpython-310.pyc
ADDED
|
Binary file (1.64 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_build_ext.cpython-310.pyc
ADDED
|
Binary file (14 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_build_py.cpython-310.pyc
ADDED
|
Binary file (5.02 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_build_scripts.cpython-310.pyc
ADDED
|
Binary file (3.73 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_cmd.cpython-310.pyc
ADDED
|
Binary file (4.32 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_config.cpython-310.pyc
ADDED
|
Binary file (4.37 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_core.cpython-310.pyc
ADDED
|
Binary file (4.47 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_cygwinccompiler.cpython-310.pyc
ADDED
|
Binary file (4.74 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_dep_util.cpython-310.pyc
ADDED
|
Binary file (2.55 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_dir_util.cpython-310.pyc
ADDED
|
Binary file (4.92 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_extension.cpython-310.pyc
ADDED
|
Binary file (2.73 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_file_util.cpython-310.pyc
ADDED
|
Binary file (4.93 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_install_data.cpython-310.pyc
ADDED
|
Binary file (1.95 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_install_headers.cpython-310.pyc
ADDED
|
Binary file (1.38 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_msvccompiler.cpython-310.pyc
ADDED
|
Binary file (2.81 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_sdist.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_sysconfig.cpython-310.pyc
ADDED
|
Binary file (8.84 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/__pycache__/test_upload.cpython-310.pyc
ADDED
|
Binary file (7.1 kB). View file
|
|
|
evalkit_llava/lib/python3.10/distutils/tests/test_archive_util.py
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Tests for distutils.archive_util."""
|
| 3 |
+
import unittest
|
| 4 |
+
import os
|
| 5 |
+
import sys
|
| 6 |
+
import tarfile
|
| 7 |
+
from os.path import splitdrive
|
| 8 |
+
import warnings
|
| 9 |
+
|
| 10 |
+
from distutils import archive_util
|
| 11 |
+
from distutils.archive_util import (check_archive_formats, make_tarball,
|
| 12 |
+
make_zipfile, make_archive,
|
| 13 |
+
ARCHIVE_FORMATS)
|
| 14 |
+
from distutils.spawn import find_executable, spawn
|
| 15 |
+
from distutils.tests import support
|
| 16 |
+
from test.support import run_unittest, patch
|
| 17 |
+
from test.support.os_helper import change_cwd
|
| 18 |
+
from test.support.warnings_helper import check_warnings
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
import grp
|
| 22 |
+
import pwd
|
| 23 |
+
UID_GID_SUPPORT = True
|
| 24 |
+
except ImportError:
|
| 25 |
+
UID_GID_SUPPORT = False
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import zipfile
|
| 29 |
+
ZIP_SUPPORT = True
|
| 30 |
+
except ImportError:
|
| 31 |
+
ZIP_SUPPORT = find_executable('zip')
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
import zlib
|
| 35 |
+
ZLIB_SUPPORT = True
|
| 36 |
+
except ImportError:
|
| 37 |
+
ZLIB_SUPPORT = False
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
import bz2
|
| 41 |
+
except ImportError:
|
| 42 |
+
bz2 = None
|
| 43 |
+
|
| 44 |
+
try:
|
| 45 |
+
import lzma
|
| 46 |
+
except ImportError:
|
| 47 |
+
lzma = None
|
| 48 |
+
|
| 49 |
+
def can_fs_encode(filename):
|
| 50 |
+
"""
|
| 51 |
+
Return True if the filename can be saved in the file system.
|
| 52 |
+
"""
|
| 53 |
+
if os.path.supports_unicode_filenames:
|
| 54 |
+
return True
|
| 55 |
+
try:
|
| 56 |
+
filename.encode(sys.getfilesystemencoding())
|
| 57 |
+
except UnicodeEncodeError:
|
| 58 |
+
return False
|
| 59 |
+
return True
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class ArchiveUtilTestCase(support.TempdirManager,
|
| 63 |
+
support.LoggingSilencer,
|
| 64 |
+
unittest.TestCase):
|
| 65 |
+
|
| 66 |
+
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
|
| 67 |
+
def test_make_tarball(self, name='archive'):
|
| 68 |
+
# creating something to tar
|
| 69 |
+
tmpdir = self._create_files()
|
| 70 |
+
self._make_tarball(tmpdir, name, '.tar.gz')
|
| 71 |
+
# trying an uncompressed one
|
| 72 |
+
self._make_tarball(tmpdir, name, '.tar', compress=None)
|
| 73 |
+
|
| 74 |
+
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
|
| 75 |
+
def test_make_tarball_gzip(self):
|
| 76 |
+
tmpdir = self._create_files()
|
| 77 |
+
self._make_tarball(tmpdir, 'archive', '.tar.gz', compress='gzip')
|
| 78 |
+
|
| 79 |
+
@unittest.skipUnless(bz2, 'Need bz2 support to run')
|
| 80 |
+
def test_make_tarball_bzip2(self):
|
| 81 |
+
tmpdir = self._create_files()
|
| 82 |
+
self._make_tarball(tmpdir, 'archive', '.tar.bz2', compress='bzip2')
|
| 83 |
+
|
| 84 |
+
@unittest.skipUnless(lzma, 'Need lzma support to run')
|
| 85 |
+
def test_make_tarball_xz(self):
|
| 86 |
+
tmpdir = self._create_files()
|
| 87 |
+
self._make_tarball(tmpdir, 'archive', '.tar.xz', compress='xz')
|
| 88 |
+
|
| 89 |
+
@unittest.skipUnless(can_fs_encode('årchiv'),
|
| 90 |
+
'File system cannot handle this filename')
|
| 91 |
+
def test_make_tarball_latin1(self):
|
| 92 |
+
"""
|
| 93 |
+
Mirror test_make_tarball, except filename contains latin characters.
|
| 94 |
+
"""
|
| 95 |
+
self.test_make_tarball('årchiv') # note this isn't a real word
|
| 96 |
+
|
| 97 |
+
@unittest.skipUnless(can_fs_encode('のアーカイブ'),
|
| 98 |
+
'File system cannot handle this filename')
|
| 99 |
+
def test_make_tarball_extended(self):
|
| 100 |
+
"""
|
| 101 |
+
Mirror test_make_tarball, except filename contains extended
|
| 102 |
+
characters outside the latin charset.
|
| 103 |
+
"""
|
| 104 |
+
self.test_make_tarball('のアーカイブ') # japanese for archive
|
| 105 |
+
|
| 106 |
+
def _make_tarball(self, tmpdir, target_name, suffix, **kwargs):
|
| 107 |
+
tmpdir2 = self.mkdtemp()
|
| 108 |
+
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
|
| 109 |
+
"source and target should be on same drive")
|
| 110 |
+
|
| 111 |
+
base_name = os.path.join(tmpdir2, target_name)
|
| 112 |
+
|
| 113 |
+
# working with relative paths to avoid tar warnings
|
| 114 |
+
with change_cwd(tmpdir):
|
| 115 |
+
make_tarball(splitdrive(base_name)[1], 'dist', **kwargs)
|
| 116 |
+
|
| 117 |
+
# check if the compressed tarball was created
|
| 118 |
+
tarball = base_name + suffix
|
| 119 |
+
self.assertTrue(os.path.exists(tarball))
|
| 120 |
+
self.assertEqual(self._tarinfo(tarball), self._created_files)
|
| 121 |
+
|
| 122 |
+
def _tarinfo(self, path):
|
| 123 |
+
tar = tarfile.open(path)
|
| 124 |
+
try:
|
| 125 |
+
names = tar.getnames()
|
| 126 |
+
names.sort()
|
| 127 |
+
return names
|
| 128 |
+
finally:
|
| 129 |
+
tar.close()
|
| 130 |
+
|
| 131 |
+
_zip_created_files = ['dist/', 'dist/file1', 'dist/file2',
|
| 132 |
+
'dist/sub/', 'dist/sub/file3', 'dist/sub2/']
|
| 133 |
+
_created_files = [p.rstrip('/') for p in _zip_created_files]
|
| 134 |
+
|
| 135 |
+
def _create_files(self):
|
| 136 |
+
# creating something to tar
|
| 137 |
+
tmpdir = self.mkdtemp()
|
| 138 |
+
dist = os.path.join(tmpdir, 'dist')
|
| 139 |
+
os.mkdir(dist)
|
| 140 |
+
self.write_file([dist, 'file1'], 'xxx')
|
| 141 |
+
self.write_file([dist, 'file2'], 'xxx')
|
| 142 |
+
os.mkdir(os.path.join(dist, 'sub'))
|
| 143 |
+
self.write_file([dist, 'sub', 'file3'], 'xxx')
|
| 144 |
+
os.mkdir(os.path.join(dist, 'sub2'))
|
| 145 |
+
return tmpdir
|
| 146 |
+
|
| 147 |
+
@unittest.skipUnless(find_executable('tar') and find_executable('gzip')
|
| 148 |
+
and ZLIB_SUPPORT,
|
| 149 |
+
'Need the tar, gzip and zlib command to run')
|
| 150 |
+
def test_tarfile_vs_tar(self):
|
| 151 |
+
tmpdir = self._create_files()
|
| 152 |
+
tmpdir2 = self.mkdtemp()
|
| 153 |
+
base_name = os.path.join(tmpdir2, 'archive')
|
| 154 |
+
old_dir = os.getcwd()
|
| 155 |
+
os.chdir(tmpdir)
|
| 156 |
+
try:
|
| 157 |
+
make_tarball(base_name, 'dist')
|
| 158 |
+
finally:
|
| 159 |
+
os.chdir(old_dir)
|
| 160 |
+
|
| 161 |
+
# check if the compressed tarball was created
|
| 162 |
+
tarball = base_name + '.tar.gz'
|
| 163 |
+
self.assertTrue(os.path.exists(tarball))
|
| 164 |
+
|
| 165 |
+
# now create another tarball using `tar`
|
| 166 |
+
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
|
| 167 |
+
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
|
| 168 |
+
gzip_cmd = ['gzip', '-f', '-9', 'archive2.tar']
|
| 169 |
+
old_dir = os.getcwd()
|
| 170 |
+
os.chdir(tmpdir)
|
| 171 |
+
try:
|
| 172 |
+
spawn(tar_cmd)
|
| 173 |
+
spawn(gzip_cmd)
|
| 174 |
+
finally:
|
| 175 |
+
os.chdir(old_dir)
|
| 176 |
+
|
| 177 |
+
self.assertTrue(os.path.exists(tarball2))
|
| 178 |
+
# let's compare both tarballs
|
| 179 |
+
self.assertEqual(self._tarinfo(tarball), self._created_files)
|
| 180 |
+
self.assertEqual(self._tarinfo(tarball2), self._created_files)
|
| 181 |
+
|
| 182 |
+
# trying an uncompressed one
|
| 183 |
+
base_name = os.path.join(tmpdir2, 'archive')
|
| 184 |
+
old_dir = os.getcwd()
|
| 185 |
+
os.chdir(tmpdir)
|
| 186 |
+
try:
|
| 187 |
+
make_tarball(base_name, 'dist', compress=None)
|
| 188 |
+
finally:
|
| 189 |
+
os.chdir(old_dir)
|
| 190 |
+
tarball = base_name + '.tar'
|
| 191 |
+
self.assertTrue(os.path.exists(tarball))
|
| 192 |
+
|
| 193 |
+
# now for a dry_run
|
| 194 |
+
base_name = os.path.join(tmpdir2, 'archive')
|
| 195 |
+
old_dir = os.getcwd()
|
| 196 |
+
os.chdir(tmpdir)
|
| 197 |
+
try:
|
| 198 |
+
make_tarball(base_name, 'dist', compress=None, dry_run=True)
|
| 199 |
+
finally:
|
| 200 |
+
os.chdir(old_dir)
|
| 201 |
+
tarball = base_name + '.tar'
|
| 202 |
+
self.assertTrue(os.path.exists(tarball))
|
| 203 |
+
|
| 204 |
+
@unittest.skipUnless(find_executable('compress'),
|
| 205 |
+
'The compress program is required')
|
| 206 |
+
def test_compress_deprecated(self):
|
| 207 |
+
tmpdir = self._create_files()
|
| 208 |
+
base_name = os.path.join(self.mkdtemp(), 'archive')
|
| 209 |
+
|
| 210 |
+
# using compress and testing the PendingDeprecationWarning
|
| 211 |
+
old_dir = os.getcwd()
|
| 212 |
+
os.chdir(tmpdir)
|
| 213 |
+
try:
|
| 214 |
+
with check_warnings() as w:
|
| 215 |
+
warnings.simplefilter("always")
|
| 216 |
+
make_tarball(base_name, 'dist', compress='compress')
|
| 217 |
+
finally:
|
| 218 |
+
os.chdir(old_dir)
|
| 219 |
+
tarball = base_name + '.tar.Z'
|
| 220 |
+
self.assertTrue(os.path.exists(tarball))
|
| 221 |
+
self.assertEqual(len(w.warnings), 1)
|
| 222 |
+
|
| 223 |
+
# same test with dry_run
|
| 224 |
+
os.remove(tarball)
|
| 225 |
+
old_dir = os.getcwd()
|
| 226 |
+
os.chdir(tmpdir)
|
| 227 |
+
try:
|
| 228 |
+
with check_warnings() as w:
|
| 229 |
+
warnings.simplefilter("always")
|
| 230 |
+
make_tarball(base_name, 'dist', compress='compress',
|
| 231 |
+
dry_run=True)
|
| 232 |
+
finally:
|
| 233 |
+
os.chdir(old_dir)
|
| 234 |
+
self.assertFalse(os.path.exists(tarball))
|
| 235 |
+
self.assertEqual(len(w.warnings), 1)
|
| 236 |
+
|
| 237 |
+
@unittest.skipUnless(ZIP_SUPPORT and ZLIB_SUPPORT,
|
| 238 |
+
'Need zip and zlib support to run')
|
| 239 |
+
def test_make_zipfile(self):
|
| 240 |
+
# creating something to tar
|
| 241 |
+
tmpdir = self._create_files()
|
| 242 |
+
base_name = os.path.join(self.mkdtemp(), 'archive')
|
| 243 |
+
with change_cwd(tmpdir):
|
| 244 |
+
make_zipfile(base_name, 'dist')
|
| 245 |
+
|
| 246 |
+
# check if the compressed tarball was created
|
| 247 |
+
tarball = base_name + '.zip'
|
| 248 |
+
self.assertTrue(os.path.exists(tarball))
|
| 249 |
+
with zipfile.ZipFile(tarball) as zf:
|
| 250 |
+
self.assertEqual(sorted(zf.namelist()), self._zip_created_files)
|
| 251 |
+
|
| 252 |
+
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
|
| 253 |
+
def test_make_zipfile_no_zlib(self):
|
| 254 |
+
patch(self, archive_util.zipfile, 'zlib', None) # force zlib ImportError
|
| 255 |
+
|
| 256 |
+
called = []
|
| 257 |
+
zipfile_class = zipfile.ZipFile
|
| 258 |
+
def fake_zipfile(*a, **kw):
|
| 259 |
+
if kw.get('compression', None) == zipfile.ZIP_STORED:
|
| 260 |
+
called.append((a, kw))
|
| 261 |
+
return zipfile_class(*a, **kw)
|
| 262 |
+
|
| 263 |
+
patch(self, archive_util.zipfile, 'ZipFile', fake_zipfile)
|
| 264 |
+
|
| 265 |
+
# create something to tar and compress
|
| 266 |
+
tmpdir = self._create_files()
|
| 267 |
+
base_name = os.path.join(self.mkdtemp(), 'archive')
|
| 268 |
+
with change_cwd(tmpdir):
|
| 269 |
+
make_zipfile(base_name, 'dist')
|
| 270 |
+
|
| 271 |
+
tarball = base_name + '.zip'
|
| 272 |
+
self.assertEqual(called,
|
| 273 |
+
[((tarball, "w"), {'compression': zipfile.ZIP_STORED})])
|
| 274 |
+
self.assertTrue(os.path.exists(tarball))
|
| 275 |
+
with zipfile.ZipFile(tarball) as zf:
|
| 276 |
+
self.assertEqual(sorted(zf.namelist()), self._zip_created_files)
|
| 277 |
+
|
| 278 |
+
def test_check_archive_formats(self):
|
| 279 |
+
self.assertEqual(check_archive_formats(['gztar', 'xxx', 'zip']),
|
| 280 |
+
'xxx')
|
| 281 |
+
self.assertIsNone(check_archive_formats(['gztar', 'bztar', 'xztar',
|
| 282 |
+
'ztar', 'tar', 'zip']))
|
| 283 |
+
|
| 284 |
+
def test_make_archive(self):
|
| 285 |
+
tmpdir = self.mkdtemp()
|
| 286 |
+
base_name = os.path.join(tmpdir, 'archive')
|
| 287 |
+
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
|
| 288 |
+
|
| 289 |
+
def test_make_archive_cwd(self):
|
| 290 |
+
current_dir = os.getcwd()
|
| 291 |
+
def _breaks(*args, **kw):
|
| 292 |
+
raise RuntimeError()
|
| 293 |
+
ARCHIVE_FORMATS['xxx'] = (_breaks, [], 'xxx file')
|
| 294 |
+
try:
|
| 295 |
+
try:
|
| 296 |
+
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
|
| 297 |
+
except:
|
| 298 |
+
pass
|
| 299 |
+
self.assertEqual(os.getcwd(), current_dir)
|
| 300 |
+
finally:
|
| 301 |
+
del ARCHIVE_FORMATS['xxx']
|
| 302 |
+
|
| 303 |
+
def test_make_archive_tar(self):
|
| 304 |
+
base_dir = self._create_files()
|
| 305 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 306 |
+
res = make_archive(base_name, 'tar', base_dir, 'dist')
|
| 307 |
+
self.assertTrue(os.path.exists(res))
|
| 308 |
+
self.assertEqual(os.path.basename(res), 'archive.tar')
|
| 309 |
+
self.assertEqual(self._tarinfo(res), self._created_files)
|
| 310 |
+
|
| 311 |
+
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
|
| 312 |
+
def test_make_archive_gztar(self):
|
| 313 |
+
base_dir = self._create_files()
|
| 314 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 315 |
+
res = make_archive(base_name, 'gztar', base_dir, 'dist')
|
| 316 |
+
self.assertTrue(os.path.exists(res))
|
| 317 |
+
self.assertEqual(os.path.basename(res), 'archive.tar.gz')
|
| 318 |
+
self.assertEqual(self._tarinfo(res), self._created_files)
|
| 319 |
+
|
| 320 |
+
@unittest.skipUnless(bz2, 'Need bz2 support to run')
|
| 321 |
+
def test_make_archive_bztar(self):
|
| 322 |
+
base_dir = self._create_files()
|
| 323 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 324 |
+
res = make_archive(base_name, 'bztar', base_dir, 'dist')
|
| 325 |
+
self.assertTrue(os.path.exists(res))
|
| 326 |
+
self.assertEqual(os.path.basename(res), 'archive.tar.bz2')
|
| 327 |
+
self.assertEqual(self._tarinfo(res), self._created_files)
|
| 328 |
+
|
| 329 |
+
@unittest.skipUnless(lzma, 'Need xz support to run')
|
| 330 |
+
def test_make_archive_xztar(self):
|
| 331 |
+
base_dir = self._create_files()
|
| 332 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 333 |
+
res = make_archive(base_name, 'xztar', base_dir, 'dist')
|
| 334 |
+
self.assertTrue(os.path.exists(res))
|
| 335 |
+
self.assertEqual(os.path.basename(res), 'archive.tar.xz')
|
| 336 |
+
self.assertEqual(self._tarinfo(res), self._created_files)
|
| 337 |
+
|
| 338 |
+
def test_make_archive_owner_group(self):
|
| 339 |
+
# testing make_archive with owner and group, with various combinations
|
| 340 |
+
# this works even if there's not gid/uid support
|
| 341 |
+
if UID_GID_SUPPORT:
|
| 342 |
+
group = grp.getgrgid(0)[0]
|
| 343 |
+
owner = pwd.getpwuid(0)[0]
|
| 344 |
+
else:
|
| 345 |
+
group = owner = 'root'
|
| 346 |
+
|
| 347 |
+
base_dir = self._create_files()
|
| 348 |
+
root_dir = self.mkdtemp()
|
| 349 |
+
base_name = os.path.join(self.mkdtemp() , 'archive')
|
| 350 |
+
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
|
| 351 |
+
group=group)
|
| 352 |
+
self.assertTrue(os.path.exists(res))
|
| 353 |
+
|
| 354 |
+
res = make_archive(base_name, 'zip', root_dir, base_dir)
|
| 355 |
+
self.assertTrue(os.path.exists(res))
|
| 356 |
+
|
| 357 |
+
res = make_archive(base_name, 'tar', root_dir, base_dir,
|
| 358 |
+
owner=owner, group=group)
|
| 359 |
+
self.assertTrue(os.path.exists(res))
|
| 360 |
+
|
| 361 |
+
res = make_archive(base_name, 'tar', root_dir, base_dir,
|
| 362 |
+
owner='kjhkjhkjg', group='oihohoh')
|
| 363 |
+
self.assertTrue(os.path.exists(res))
|
| 364 |
+
|
| 365 |
+
@unittest.skipUnless(ZLIB_SUPPORT, "Requires zlib")
|
| 366 |
+
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
|
| 367 |
+
def test_tarfile_root_owner(self):
|
| 368 |
+
tmpdir = self._create_files()
|
| 369 |
+
base_name = os.path.join(self.mkdtemp(), 'archive')
|
| 370 |
+
old_dir = os.getcwd()
|
| 371 |
+
os.chdir(tmpdir)
|
| 372 |
+
group = grp.getgrgid(0)[0]
|
| 373 |
+
owner = pwd.getpwuid(0)[0]
|
| 374 |
+
try:
|
| 375 |
+
archive_name = make_tarball(base_name, 'dist', compress=None,
|
| 376 |
+
owner=owner, group=group)
|
| 377 |
+
finally:
|
| 378 |
+
os.chdir(old_dir)
|
| 379 |
+
|
| 380 |
+
# check if the compressed tarball was created
|
| 381 |
+
self.assertTrue(os.path.exists(archive_name))
|
| 382 |
+
|
| 383 |
+
# now checks the rights
|
| 384 |
+
archive = tarfile.open(archive_name)
|
| 385 |
+
try:
|
| 386 |
+
for member in archive.getmembers():
|
| 387 |
+
self.assertEqual(member.uid, 0)
|
| 388 |
+
self.assertEqual(member.gid, 0)
|
| 389 |
+
finally:
|
| 390 |
+
archive.close()
|
| 391 |
+
|
| 392 |
+
def test_suite():
|
| 393 |
+
return unittest.makeSuite(ArchiveUtilTestCase)
|
| 394 |
+
|
| 395 |
+
if __name__ == "__main__":
|
| 396 |
+
run_unittest(test_suite())
|
evalkit_llava/lib/python3.10/distutils/tests/test_bdist.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.bdist."""
|
| 2 |
+
import os
|
| 3 |
+
import unittest
|
| 4 |
+
from test.support import run_unittest
|
| 5 |
+
|
| 6 |
+
import warnings
|
| 7 |
+
with warnings.catch_warnings():
|
| 8 |
+
warnings.simplefilter('ignore', DeprecationWarning)
|
| 9 |
+
from distutils.command.bdist import bdist
|
| 10 |
+
from distutils.tests import support
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class BuildTestCase(support.TempdirManager,
|
| 14 |
+
unittest.TestCase):
|
| 15 |
+
|
| 16 |
+
def test_formats(self):
|
| 17 |
+
# let's create a command and make sure
|
| 18 |
+
# we can set the format
|
| 19 |
+
dist = self.create_dist()[1]
|
| 20 |
+
cmd = bdist(dist)
|
| 21 |
+
cmd.formats = ['msi']
|
| 22 |
+
cmd.ensure_finalized()
|
| 23 |
+
self.assertEqual(cmd.formats, ['msi'])
|
| 24 |
+
|
| 25 |
+
# what formats does bdist offer?
|
| 26 |
+
formats = ['bztar', 'gztar', 'msi', 'rpm', 'tar',
|
| 27 |
+
'xztar', 'zip', 'ztar']
|
| 28 |
+
found = sorted(cmd.format_command)
|
| 29 |
+
self.assertEqual(found, formats)
|
| 30 |
+
|
| 31 |
+
def test_skip_build(self):
|
| 32 |
+
# bug #10946: bdist --skip-build should trickle down to subcommands
|
| 33 |
+
dist = self.create_dist()[1]
|
| 34 |
+
cmd = bdist(dist)
|
| 35 |
+
cmd.skip_build = 1
|
| 36 |
+
cmd.ensure_finalized()
|
| 37 |
+
dist.command_obj['bdist'] = cmd
|
| 38 |
+
|
| 39 |
+
names = ['bdist_dumb'] # bdist_rpm does not support --skip-build
|
| 40 |
+
if os.name == 'nt':
|
| 41 |
+
names.append('bdist_msi')
|
| 42 |
+
|
| 43 |
+
for name in names:
|
| 44 |
+
subcmd = cmd.get_finalized_command(name)
|
| 45 |
+
if getattr(subcmd, '_unsupported', False):
|
| 46 |
+
# command is not supported on this build
|
| 47 |
+
continue
|
| 48 |
+
self.assertTrue(subcmd.skip_build,
|
| 49 |
+
'%s should take --skip-build from bdist' % name)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def test_suite():
|
| 53 |
+
return unittest.makeSuite(BuildTestCase)
|
| 54 |
+
|
| 55 |
+
if __name__ == '__main__':
|
| 56 |
+
run_unittest(test_suite())
|
evalkit_llava/lib/python3.10/distutils/tests/test_build_ext.py
ADDED
|
@@ -0,0 +1,553 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
from io import StringIO
|
| 4 |
+
import textwrap
|
| 5 |
+
|
| 6 |
+
from distutils.core import Distribution
|
| 7 |
+
from distutils.command.build_ext import build_ext
|
| 8 |
+
from distutils import sysconfig
|
| 9 |
+
from distutils.tests.support import (TempdirManager, LoggingSilencer,
|
| 10 |
+
copy_xxmodule_c, fixup_build_ext)
|
| 11 |
+
from distutils.extension import Extension
|
| 12 |
+
from distutils.errors import (
|
| 13 |
+
CompileError, DistutilsPlatformError, DistutilsSetupError,
|
| 14 |
+
UnknownFileError)
|
| 15 |
+
|
| 16 |
+
import unittest
|
| 17 |
+
from test import support
|
| 18 |
+
from test.support import os_helper
|
| 19 |
+
from test.support.script_helper import assert_python_ok
|
| 20 |
+
|
| 21 |
+
# http://bugs.python.org/issue4373
|
| 22 |
+
# Don't load the xx module more than once.
|
| 23 |
+
ALREADY_TESTED = False
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class BuildExtTestCase(TempdirManager,
|
| 27 |
+
LoggingSilencer,
|
| 28 |
+
unittest.TestCase):
|
| 29 |
+
def setUp(self):
|
| 30 |
+
# Create a simple test environment
|
| 31 |
+
super(BuildExtTestCase, self).setUp()
|
| 32 |
+
self.tmp_dir = self.mkdtemp()
|
| 33 |
+
import site
|
| 34 |
+
self.old_user_base = site.USER_BASE
|
| 35 |
+
site.USER_BASE = self.mkdtemp()
|
| 36 |
+
from distutils.command import build_ext
|
| 37 |
+
build_ext.USER_BASE = site.USER_BASE
|
| 38 |
+
self.old_config_vars = dict(sysconfig._config_vars)
|
| 39 |
+
|
| 40 |
+
# bpo-30132: On Windows, a .pdb file may be created in the current
|
| 41 |
+
# working directory. Create a temporary working directory to cleanup
|
| 42 |
+
# everything at the end of the test.
|
| 43 |
+
change_cwd = os_helper.change_cwd(self.tmp_dir)
|
| 44 |
+
change_cwd.__enter__()
|
| 45 |
+
self.addCleanup(change_cwd.__exit__, None, None, None)
|
| 46 |
+
|
| 47 |
+
def tearDown(self):
|
| 48 |
+
import site
|
| 49 |
+
site.USER_BASE = self.old_user_base
|
| 50 |
+
from distutils.command import build_ext
|
| 51 |
+
build_ext.USER_BASE = self.old_user_base
|
| 52 |
+
sysconfig._config_vars.clear()
|
| 53 |
+
sysconfig._config_vars.update(self.old_config_vars)
|
| 54 |
+
super(BuildExtTestCase, self).tearDown()
|
| 55 |
+
|
| 56 |
+
def build_ext(self, *args, **kwargs):
|
| 57 |
+
return build_ext(*args, **kwargs)
|
| 58 |
+
|
| 59 |
+
def test_build_ext(self):
|
| 60 |
+
cmd = support.missing_compiler_executable()
|
| 61 |
+
if cmd is not None:
|
| 62 |
+
self.skipTest('The %r command is not found' % cmd)
|
| 63 |
+
global ALREADY_TESTED
|
| 64 |
+
copy_xxmodule_c(self.tmp_dir)
|
| 65 |
+
xx_c = os.path.join(self.tmp_dir, 'xxmodule.c')
|
| 66 |
+
xx_ext = Extension('xx', [xx_c])
|
| 67 |
+
dist = Distribution({'name': 'xx', 'ext_modules': [xx_ext]})
|
| 68 |
+
dist.package_dir = self.tmp_dir
|
| 69 |
+
cmd = self.build_ext(dist)
|
| 70 |
+
fixup_build_ext(cmd)
|
| 71 |
+
cmd.build_lib = self.tmp_dir
|
| 72 |
+
cmd.build_temp = self.tmp_dir
|
| 73 |
+
|
| 74 |
+
old_stdout = sys.stdout
|
| 75 |
+
if not support.verbose:
|
| 76 |
+
# silence compiler output
|
| 77 |
+
sys.stdout = StringIO()
|
| 78 |
+
try:
|
| 79 |
+
cmd.ensure_finalized()
|
| 80 |
+
cmd.run()
|
| 81 |
+
finally:
|
| 82 |
+
sys.stdout = old_stdout
|
| 83 |
+
|
| 84 |
+
if ALREADY_TESTED:
|
| 85 |
+
self.skipTest('Already tested in %s' % ALREADY_TESTED)
|
| 86 |
+
else:
|
| 87 |
+
ALREADY_TESTED = type(self).__name__
|
| 88 |
+
|
| 89 |
+
code = textwrap.dedent(f"""
|
| 90 |
+
tmp_dir = {self.tmp_dir!r}
|
| 91 |
+
|
| 92 |
+
import sys
|
| 93 |
+
import unittest
|
| 94 |
+
from test import support
|
| 95 |
+
|
| 96 |
+
sys.path.insert(0, tmp_dir)
|
| 97 |
+
import xx
|
| 98 |
+
|
| 99 |
+
class Tests(unittest.TestCase):
|
| 100 |
+
def test_xx(self):
|
| 101 |
+
for attr in ('error', 'foo', 'new', 'roj'):
|
| 102 |
+
self.assertTrue(hasattr(xx, attr))
|
| 103 |
+
|
| 104 |
+
self.assertEqual(xx.foo(2, 5), 7)
|
| 105 |
+
self.assertEqual(xx.foo(13,15), 28)
|
| 106 |
+
self.assertEqual(xx.new().demo(), None)
|
| 107 |
+
if support.HAVE_DOCSTRINGS:
|
| 108 |
+
doc = 'This is a template module just for instruction.'
|
| 109 |
+
self.assertEqual(xx.__doc__, doc)
|
| 110 |
+
self.assertIsInstance(xx.Null(), xx.Null)
|
| 111 |
+
self.assertIsInstance(xx.Str(), xx.Str)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
unittest.main()
|
| 115 |
+
""")
|
| 116 |
+
assert_python_ok('-c', code)
|
| 117 |
+
|
| 118 |
+
def test_solaris_enable_shared(self):
|
| 119 |
+
dist = Distribution({'name': 'xx'})
|
| 120 |
+
cmd = self.build_ext(dist)
|
| 121 |
+
old = sys.platform
|
| 122 |
+
|
| 123 |
+
sys.platform = 'sunos' # fooling finalize_options
|
| 124 |
+
from distutils.sysconfig import _config_vars
|
| 125 |
+
old_var = _config_vars.get('Py_ENABLE_SHARED')
|
| 126 |
+
_config_vars['Py_ENABLE_SHARED'] = 1
|
| 127 |
+
try:
|
| 128 |
+
cmd.ensure_finalized()
|
| 129 |
+
finally:
|
| 130 |
+
sys.platform = old
|
| 131 |
+
if old_var is None:
|
| 132 |
+
del _config_vars['Py_ENABLE_SHARED']
|
| 133 |
+
else:
|
| 134 |
+
_config_vars['Py_ENABLE_SHARED'] = old_var
|
| 135 |
+
|
| 136 |
+
# make sure we get some library dirs under solaris
|
| 137 |
+
self.assertGreater(len(cmd.library_dirs), 0)
|
| 138 |
+
|
| 139 |
+
def test_user_site(self):
|
| 140 |
+
import site
|
| 141 |
+
dist = Distribution({'name': 'xx'})
|
| 142 |
+
cmd = self.build_ext(dist)
|
| 143 |
+
|
| 144 |
+
# making sure the user option is there
|
| 145 |
+
options = [name for name, short, lable in
|
| 146 |
+
cmd.user_options]
|
| 147 |
+
self.assertIn('user', options)
|
| 148 |
+
|
| 149 |
+
# setting a value
|
| 150 |
+
cmd.user = 1
|
| 151 |
+
|
| 152 |
+
# setting user based lib and include
|
| 153 |
+
lib = os.path.join(site.USER_BASE, 'lib')
|
| 154 |
+
incl = os.path.join(site.USER_BASE, 'include')
|
| 155 |
+
os.mkdir(lib)
|
| 156 |
+
os.mkdir(incl)
|
| 157 |
+
|
| 158 |
+
# let's run finalize
|
| 159 |
+
cmd.ensure_finalized()
|
| 160 |
+
|
| 161 |
+
# see if include_dirs and library_dirs
|
| 162 |
+
# were set
|
| 163 |
+
self.assertIn(lib, cmd.library_dirs)
|
| 164 |
+
self.assertIn(lib, cmd.rpath)
|
| 165 |
+
self.assertIn(incl, cmd.include_dirs)
|
| 166 |
+
|
| 167 |
+
def test_optional_extension(self):
|
| 168 |
+
|
| 169 |
+
# this extension will fail, but let's ignore this failure
|
| 170 |
+
# with the optional argument.
|
| 171 |
+
modules = [Extension('foo', ['xxx'], optional=False)]
|
| 172 |
+
dist = Distribution({'name': 'xx', 'ext_modules': modules})
|
| 173 |
+
cmd = self.build_ext(dist)
|
| 174 |
+
cmd.ensure_finalized()
|
| 175 |
+
self.assertRaises((UnknownFileError, CompileError),
|
| 176 |
+
cmd.run) # should raise an error
|
| 177 |
+
|
| 178 |
+
modules = [Extension('foo', ['xxx'], optional=True)]
|
| 179 |
+
dist = Distribution({'name': 'xx', 'ext_modules': modules})
|
| 180 |
+
cmd = self.build_ext(dist)
|
| 181 |
+
cmd.ensure_finalized()
|
| 182 |
+
cmd.run() # should pass
|
| 183 |
+
|
| 184 |
+
def test_finalize_options(self):
|
| 185 |
+
# Make sure Python's include directories (for Python.h, pyconfig.h,
|
| 186 |
+
# etc.) are in the include search path.
|
| 187 |
+
modules = [Extension('foo', ['xxx'], optional=False)]
|
| 188 |
+
dist = Distribution({'name': 'xx', 'ext_modules': modules})
|
| 189 |
+
cmd = self.build_ext(dist)
|
| 190 |
+
cmd.finalize_options()
|
| 191 |
+
|
| 192 |
+
py_include = sysconfig.get_python_inc()
|
| 193 |
+
for p in py_include.split(os.path.pathsep):
|
| 194 |
+
self.assertIn(p, cmd.include_dirs)
|
| 195 |
+
|
| 196 |
+
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
|
| 197 |
+
for p in plat_py_include.split(os.path.pathsep):
|
| 198 |
+
self.assertIn(p, cmd.include_dirs)
|
| 199 |
+
|
| 200 |
+
# make sure cmd.libraries is turned into a list
|
| 201 |
+
# if it's a string
|
| 202 |
+
cmd = self.build_ext(dist)
|
| 203 |
+
cmd.libraries = 'my_lib, other_lib lastlib'
|
| 204 |
+
cmd.finalize_options()
|
| 205 |
+
self.assertEqual(cmd.libraries, ['my_lib', 'other_lib', 'lastlib'])
|
| 206 |
+
|
| 207 |
+
# make sure cmd.library_dirs is turned into a list
|
| 208 |
+
# if it's a string
|
| 209 |
+
cmd = self.build_ext(dist)
|
| 210 |
+
cmd.library_dirs = 'my_lib_dir%sother_lib_dir' % os.pathsep
|
| 211 |
+
cmd.finalize_options()
|
| 212 |
+
self.assertIn('my_lib_dir', cmd.library_dirs)
|
| 213 |
+
self.assertIn('other_lib_dir', cmd.library_dirs)
|
| 214 |
+
|
| 215 |
+
# make sure rpath is turned into a list
|
| 216 |
+
# if it's a string
|
| 217 |
+
cmd = self.build_ext(dist)
|
| 218 |
+
cmd.rpath = 'one%stwo' % os.pathsep
|
| 219 |
+
cmd.finalize_options()
|
| 220 |
+
self.assertEqual(cmd.rpath, ['one', 'two'])
|
| 221 |
+
|
| 222 |
+
# make sure cmd.link_objects is turned into a list
|
| 223 |
+
# if it's a string
|
| 224 |
+
cmd = build_ext(dist)
|
| 225 |
+
cmd.link_objects = 'one two,three'
|
| 226 |
+
cmd.finalize_options()
|
| 227 |
+
self.assertEqual(cmd.link_objects, ['one', 'two', 'three'])
|
| 228 |
+
|
| 229 |
+
# XXX more tests to perform for win32
|
| 230 |
+
|
| 231 |
+
# make sure define is turned into 2-tuples
|
| 232 |
+
# strings if they are ','-separated strings
|
| 233 |
+
cmd = self.build_ext(dist)
|
| 234 |
+
cmd.define = 'one,two'
|
| 235 |
+
cmd.finalize_options()
|
| 236 |
+
self.assertEqual(cmd.define, [('one', '1'), ('two', '1')])
|
| 237 |
+
|
| 238 |
+
# make sure undef is turned into a list of
|
| 239 |
+
# strings if they are ','-separated strings
|
| 240 |
+
cmd = self.build_ext(dist)
|
| 241 |
+
cmd.undef = 'one,two'
|
| 242 |
+
cmd.finalize_options()
|
| 243 |
+
self.assertEqual(cmd.undef, ['one', 'two'])
|
| 244 |
+
|
| 245 |
+
# make sure swig_opts is turned into a list
|
| 246 |
+
cmd = self.build_ext(dist)
|
| 247 |
+
cmd.swig_opts = None
|
| 248 |
+
cmd.finalize_options()
|
| 249 |
+
self.assertEqual(cmd.swig_opts, [])
|
| 250 |
+
|
| 251 |
+
cmd = self.build_ext(dist)
|
| 252 |
+
cmd.swig_opts = '1 2'
|
| 253 |
+
cmd.finalize_options()
|
| 254 |
+
self.assertEqual(cmd.swig_opts, ['1', '2'])
|
| 255 |
+
|
| 256 |
+
def test_check_extensions_list(self):
|
| 257 |
+
dist = Distribution()
|
| 258 |
+
cmd = self.build_ext(dist)
|
| 259 |
+
cmd.finalize_options()
|
| 260 |
+
|
| 261 |
+
#'extensions' option must be a list of Extension instances
|
| 262 |
+
self.assertRaises(DistutilsSetupError,
|
| 263 |
+
cmd.check_extensions_list, 'foo')
|
| 264 |
+
|
| 265 |
+
# each element of 'ext_modules' option must be an
|
| 266 |
+
# Extension instance or 2-tuple
|
| 267 |
+
exts = [('bar', 'foo', 'bar'), 'foo']
|
| 268 |
+
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
|
| 269 |
+
|
| 270 |
+
# first element of each tuple in 'ext_modules'
|
| 271 |
+
# must be the extension name (a string) and match
|
| 272 |
+
# a python dotted-separated name
|
| 273 |
+
exts = [('foo-bar', '')]
|
| 274 |
+
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
|
| 275 |
+
|
| 276 |
+
# second element of each tuple in 'ext_modules'
|
| 277 |
+
# must be a dictionary (build info)
|
| 278 |
+
exts = [('foo.bar', '')]
|
| 279 |
+
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
|
| 280 |
+
|
| 281 |
+
# ok this one should pass
|
| 282 |
+
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
|
| 283 |
+
'some': 'bar'})]
|
| 284 |
+
cmd.check_extensions_list(exts)
|
| 285 |
+
ext = exts[0]
|
| 286 |
+
self.assertIsInstance(ext, Extension)
|
| 287 |
+
|
| 288 |
+
# check_extensions_list adds in ext the values passed
|
| 289 |
+
# when they are in ('include_dirs', 'library_dirs', 'libraries'
|
| 290 |
+
# 'extra_objects', 'extra_compile_args', 'extra_link_args')
|
| 291 |
+
self.assertEqual(ext.libraries, 'foo')
|
| 292 |
+
self.assertFalse(hasattr(ext, 'some'))
|
| 293 |
+
|
| 294 |
+
# 'macros' element of build info dict must be 1- or 2-tuple
|
| 295 |
+
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
|
| 296 |
+
'some': 'bar', 'macros': [('1', '2', '3'), 'foo']})]
|
| 297 |
+
self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts)
|
| 298 |
+
|
| 299 |
+
exts[0][1]['macros'] = [('1', '2'), ('3',)]
|
| 300 |
+
cmd.check_extensions_list(exts)
|
| 301 |
+
self.assertEqual(exts[0].undef_macros, ['3'])
|
| 302 |
+
self.assertEqual(exts[0].define_macros, [('1', '2')])
|
| 303 |
+
|
| 304 |
+
def test_get_source_files(self):
|
| 305 |
+
modules = [Extension('foo', ['xxx'], optional=False)]
|
| 306 |
+
dist = Distribution({'name': 'xx', 'ext_modules': modules})
|
| 307 |
+
cmd = self.build_ext(dist)
|
| 308 |
+
cmd.ensure_finalized()
|
| 309 |
+
self.assertEqual(cmd.get_source_files(), ['xxx'])
|
| 310 |
+
|
| 311 |
+
def test_unicode_module_names(self):
|
| 312 |
+
modules = [
|
| 313 |
+
Extension('foo', ['aaa'], optional=False),
|
| 314 |
+
Extension('föö', ['uuu'], optional=False),
|
| 315 |
+
]
|
| 316 |
+
dist = Distribution({'name': 'xx', 'ext_modules': modules})
|
| 317 |
+
cmd = self.build_ext(dist)
|
| 318 |
+
cmd.ensure_finalized()
|
| 319 |
+
self.assertRegex(cmd.get_ext_filename(modules[0].name), r'foo(_d)?\..*')
|
| 320 |
+
self.assertRegex(cmd.get_ext_filename(modules[1].name), r'föö(_d)?\..*')
|
| 321 |
+
self.assertEqual(cmd.get_export_symbols(modules[0]), ['PyInit_foo'])
|
| 322 |
+
self.assertEqual(cmd.get_export_symbols(modules[1]), ['PyInitU_f_gkaa'])
|
| 323 |
+
|
| 324 |
+
def test_compiler_option(self):
|
| 325 |
+
# cmd.compiler is an option and
|
| 326 |
+
# should not be overridden by a compiler instance
|
| 327 |
+
# when the command is run
|
| 328 |
+
dist = Distribution()
|
| 329 |
+
cmd = self.build_ext(dist)
|
| 330 |
+
cmd.compiler = 'unix'
|
| 331 |
+
cmd.ensure_finalized()
|
| 332 |
+
cmd.run()
|
| 333 |
+
self.assertEqual(cmd.compiler, 'unix')
|
| 334 |
+
|
| 335 |
+
def test_get_outputs(self):
|
| 336 |
+
cmd = support.missing_compiler_executable()
|
| 337 |
+
if cmd is not None:
|
| 338 |
+
self.skipTest('The %r command is not found' % cmd)
|
| 339 |
+
tmp_dir = self.mkdtemp()
|
| 340 |
+
c_file = os.path.join(tmp_dir, 'foo.c')
|
| 341 |
+
self.write_file(c_file, 'void PyInit_foo(void) {}\n')
|
| 342 |
+
ext = Extension('foo', [c_file], optional=False)
|
| 343 |
+
dist = Distribution({'name': 'xx',
|
| 344 |
+
'ext_modules': [ext]})
|
| 345 |
+
cmd = self.build_ext(dist)
|
| 346 |
+
fixup_build_ext(cmd)
|
| 347 |
+
cmd.ensure_finalized()
|
| 348 |
+
self.assertEqual(len(cmd.get_outputs()), 1)
|
| 349 |
+
|
| 350 |
+
cmd.build_lib = os.path.join(self.tmp_dir, 'build')
|
| 351 |
+
cmd.build_temp = os.path.join(self.tmp_dir, 'tempt')
|
| 352 |
+
|
| 353 |
+
# issue #5977 : distutils build_ext.get_outputs
|
| 354 |
+
# returns wrong result with --inplace
|
| 355 |
+
other_tmp_dir = os.path.realpath(self.mkdtemp())
|
| 356 |
+
old_wd = os.getcwd()
|
| 357 |
+
os.chdir(other_tmp_dir)
|
| 358 |
+
try:
|
| 359 |
+
cmd.inplace = 1
|
| 360 |
+
cmd.run()
|
| 361 |
+
so_file = cmd.get_outputs()[0]
|
| 362 |
+
finally:
|
| 363 |
+
os.chdir(old_wd)
|
| 364 |
+
self.assertTrue(os.path.exists(so_file))
|
| 365 |
+
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
|
| 366 |
+
self.assertTrue(so_file.endswith(ext_suffix))
|
| 367 |
+
so_dir = os.path.dirname(so_file)
|
| 368 |
+
self.assertEqual(so_dir, other_tmp_dir)
|
| 369 |
+
|
| 370 |
+
cmd.inplace = 0
|
| 371 |
+
cmd.compiler = None
|
| 372 |
+
cmd.run()
|
| 373 |
+
so_file = cmd.get_outputs()[0]
|
| 374 |
+
self.assertTrue(os.path.exists(so_file))
|
| 375 |
+
self.assertTrue(so_file.endswith(ext_suffix))
|
| 376 |
+
so_dir = os.path.dirname(so_file)
|
| 377 |
+
self.assertEqual(so_dir, cmd.build_lib)
|
| 378 |
+
|
| 379 |
+
# inplace = 0, cmd.package = 'bar'
|
| 380 |
+
build_py = cmd.get_finalized_command('build_py')
|
| 381 |
+
build_py.package_dir = {'': 'bar'}
|
| 382 |
+
path = cmd.get_ext_fullpath('foo')
|
| 383 |
+
# checking that the last directory is the build_dir
|
| 384 |
+
path = os.path.split(path)[0]
|
| 385 |
+
self.assertEqual(path, cmd.build_lib)
|
| 386 |
+
|
| 387 |
+
# inplace = 1, cmd.package = 'bar'
|
| 388 |
+
cmd.inplace = 1
|
| 389 |
+
other_tmp_dir = os.path.realpath(self.mkdtemp())
|
| 390 |
+
old_wd = os.getcwd()
|
| 391 |
+
os.chdir(other_tmp_dir)
|
| 392 |
+
try:
|
| 393 |
+
path = cmd.get_ext_fullpath('foo')
|
| 394 |
+
finally:
|
| 395 |
+
os.chdir(old_wd)
|
| 396 |
+
# checking that the last directory is bar
|
| 397 |
+
path = os.path.split(path)[0]
|
| 398 |
+
lastdir = os.path.split(path)[-1]
|
| 399 |
+
self.assertEqual(lastdir, 'bar')
|
| 400 |
+
|
| 401 |
+
def test_ext_fullpath(self):
|
| 402 |
+
ext = sysconfig.get_config_var('EXT_SUFFIX')
|
| 403 |
+
# building lxml.etree inplace
|
| 404 |
+
#etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c')
|
| 405 |
+
#etree_ext = Extension('lxml.etree', [etree_c])
|
| 406 |
+
#dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]})
|
| 407 |
+
dist = Distribution()
|
| 408 |
+
cmd = self.build_ext(dist)
|
| 409 |
+
cmd.inplace = 1
|
| 410 |
+
cmd.distribution.package_dir = {'': 'src'}
|
| 411 |
+
cmd.distribution.packages = ['lxml', 'lxml.html']
|
| 412 |
+
curdir = os.getcwd()
|
| 413 |
+
wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext)
|
| 414 |
+
path = cmd.get_ext_fullpath('lxml.etree')
|
| 415 |
+
self.assertEqual(wanted, path)
|
| 416 |
+
|
| 417 |
+
# building lxml.etree not inplace
|
| 418 |
+
cmd.inplace = 0
|
| 419 |
+
cmd.build_lib = os.path.join(curdir, 'tmpdir')
|
| 420 |
+
wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext)
|
| 421 |
+
path = cmd.get_ext_fullpath('lxml.etree')
|
| 422 |
+
self.assertEqual(wanted, path)
|
| 423 |
+
|
| 424 |
+
# building twisted.runner.portmap not inplace
|
| 425 |
+
build_py = cmd.get_finalized_command('build_py')
|
| 426 |
+
build_py.package_dir = {}
|
| 427 |
+
cmd.distribution.packages = ['twisted', 'twisted.runner.portmap']
|
| 428 |
+
path = cmd.get_ext_fullpath('twisted.runner.portmap')
|
| 429 |
+
wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner',
|
| 430 |
+
'portmap' + ext)
|
| 431 |
+
self.assertEqual(wanted, path)
|
| 432 |
+
|
| 433 |
+
# building twisted.runner.portmap inplace
|
| 434 |
+
cmd.inplace = 1
|
| 435 |
+
path = cmd.get_ext_fullpath('twisted.runner.portmap')
|
| 436 |
+
wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext)
|
| 437 |
+
self.assertEqual(wanted, path)
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
|
| 441 |
+
def test_deployment_target_default(self):
|
| 442 |
+
# Issue 9516: Test that, in the absence of the environment variable,
|
| 443 |
+
# an extension module is compiled with the same deployment target as
|
| 444 |
+
# the interpreter.
|
| 445 |
+
self._try_compile_deployment_target('==', None)
|
| 446 |
+
|
| 447 |
+
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
|
| 448 |
+
def test_deployment_target_too_low(self):
|
| 449 |
+
# Issue 9516: Test that an extension module is not allowed to be
|
| 450 |
+
# compiled with a deployment target less than that of the interpreter.
|
| 451 |
+
self.assertRaises(DistutilsPlatformError,
|
| 452 |
+
self._try_compile_deployment_target, '>', '10.1')
|
| 453 |
+
|
| 454 |
+
@unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX')
|
| 455 |
+
def test_deployment_target_higher_ok(self):
|
| 456 |
+
# Issue 9516: Test that an extension module can be compiled with a
|
| 457 |
+
# deployment target higher than that of the interpreter: the ext
|
| 458 |
+
# module may depend on some newer OS feature.
|
| 459 |
+
deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
|
| 460 |
+
if deptarget:
|
| 461 |
+
# increment the minor version number (i.e. 10.6 -> 10.7)
|
| 462 |
+
deptarget = [int(x) for x in deptarget.split('.')]
|
| 463 |
+
deptarget[-1] += 1
|
| 464 |
+
deptarget = '.'.join(str(i) for i in deptarget)
|
| 465 |
+
self._try_compile_deployment_target('<', deptarget)
|
| 466 |
+
|
| 467 |
+
def _try_compile_deployment_target(self, operator, target):
|
| 468 |
+
orig_environ = os.environ
|
| 469 |
+
os.environ = orig_environ.copy()
|
| 470 |
+
self.addCleanup(setattr, os, 'environ', orig_environ)
|
| 471 |
+
|
| 472 |
+
if target is None:
|
| 473 |
+
if os.environ.get('MACOSX_DEPLOYMENT_TARGET'):
|
| 474 |
+
del os.environ['MACOSX_DEPLOYMENT_TARGET']
|
| 475 |
+
else:
|
| 476 |
+
os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
|
| 477 |
+
|
| 478 |
+
deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c')
|
| 479 |
+
|
| 480 |
+
with open(deptarget_c, 'w') as fp:
|
| 481 |
+
fp.write(textwrap.dedent('''\
|
| 482 |
+
#include <AvailabilityMacros.h>
|
| 483 |
+
|
| 484 |
+
int dummy;
|
| 485 |
+
|
| 486 |
+
#if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED
|
| 487 |
+
#else
|
| 488 |
+
#error "Unexpected target"
|
| 489 |
+
#endif
|
| 490 |
+
|
| 491 |
+
''' % operator))
|
| 492 |
+
|
| 493 |
+
# get the deployment target that the interpreter was built with
|
| 494 |
+
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
|
| 495 |
+
target = tuple(map(int, target.split('.')[0:2]))
|
| 496 |
+
# format the target value as defined in the Apple
|
| 497 |
+
# Availability Macros. We can't use the macro names since
|
| 498 |
+
# at least one value we test with will not exist yet.
|
| 499 |
+
if target[:2] < (10, 10):
|
| 500 |
+
# for 10.1 through 10.9.x -> "10n0"
|
| 501 |
+
target = '%02d%01d0' % target
|
| 502 |
+
else:
|
| 503 |
+
# for 10.10 and beyond -> "10nn00"
|
| 504 |
+
if len(target) >= 2:
|
| 505 |
+
target = '%02d%02d00' % target
|
| 506 |
+
else:
|
| 507 |
+
# 11 and later can have no minor version (11 instead of 11.0)
|
| 508 |
+
target = '%02d0000' % target
|
| 509 |
+
deptarget_ext = Extension(
|
| 510 |
+
'deptarget',
|
| 511 |
+
[deptarget_c],
|
| 512 |
+
extra_compile_args=['-DTARGET=%s'%(target,)],
|
| 513 |
+
)
|
| 514 |
+
dist = Distribution({
|
| 515 |
+
'name': 'deptarget',
|
| 516 |
+
'ext_modules': [deptarget_ext]
|
| 517 |
+
})
|
| 518 |
+
dist.package_dir = self.tmp_dir
|
| 519 |
+
cmd = self.build_ext(dist)
|
| 520 |
+
cmd.build_lib = self.tmp_dir
|
| 521 |
+
cmd.build_temp = self.tmp_dir
|
| 522 |
+
|
| 523 |
+
try:
|
| 524 |
+
old_stdout = sys.stdout
|
| 525 |
+
if not support.verbose:
|
| 526 |
+
# silence compiler output
|
| 527 |
+
sys.stdout = StringIO()
|
| 528 |
+
try:
|
| 529 |
+
cmd.ensure_finalized()
|
| 530 |
+
cmd.run()
|
| 531 |
+
finally:
|
| 532 |
+
sys.stdout = old_stdout
|
| 533 |
+
|
| 534 |
+
except CompileError:
|
| 535 |
+
self.fail("Wrong deployment target during compilation")
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
class ParallelBuildExtTestCase(BuildExtTestCase):
|
| 539 |
+
|
| 540 |
+
def build_ext(self, *args, **kwargs):
|
| 541 |
+
build_ext = super().build_ext(*args, **kwargs)
|
| 542 |
+
build_ext.parallel = True
|
| 543 |
+
return build_ext
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def test_suite():
|
| 547 |
+
suite = unittest.TestSuite()
|
| 548 |
+
suite.addTest(unittest.makeSuite(BuildExtTestCase))
|
| 549 |
+
suite.addTest(unittest.makeSuite(ParallelBuildExtTestCase))
|
| 550 |
+
return suite
|
| 551 |
+
|
| 552 |
+
if __name__ == '__main__':
|
| 553 |
+
support.run_unittest(__name__)
|
evalkit_llava/lib/python3.10/distutils/tests/test_build_py.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.build_py."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import unittest
|
| 6 |
+
|
| 7 |
+
from distutils.command.build_py import build_py
|
| 8 |
+
from distutils.core import Distribution
|
| 9 |
+
from distutils.errors import DistutilsFileError
|
| 10 |
+
|
| 11 |
+
from distutils.tests import support
|
| 12 |
+
from test.support import run_unittest
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class BuildPyTestCase(support.TempdirManager,
|
| 16 |
+
support.LoggingSilencer,
|
| 17 |
+
unittest.TestCase):
|
| 18 |
+
|
| 19 |
+
def test_package_data(self):
|
| 20 |
+
sources = self.mkdtemp()
|
| 21 |
+
f = open(os.path.join(sources, "__init__.py"), "w")
|
| 22 |
+
try:
|
| 23 |
+
f.write("# Pretend this is a package.")
|
| 24 |
+
finally:
|
| 25 |
+
f.close()
|
| 26 |
+
f = open(os.path.join(sources, "README.txt"), "w")
|
| 27 |
+
try:
|
| 28 |
+
f.write("Info about this package")
|
| 29 |
+
finally:
|
| 30 |
+
f.close()
|
| 31 |
+
|
| 32 |
+
destination = self.mkdtemp()
|
| 33 |
+
|
| 34 |
+
dist = Distribution({"packages": ["pkg"],
|
| 35 |
+
"package_dir": {"pkg": sources}})
|
| 36 |
+
# script_name need not exist, it just need to be initialized
|
| 37 |
+
dist.script_name = os.path.join(sources, "setup.py")
|
| 38 |
+
dist.command_obj["build"] = support.DummyCommand(
|
| 39 |
+
force=0,
|
| 40 |
+
build_lib=destination)
|
| 41 |
+
dist.packages = ["pkg"]
|
| 42 |
+
dist.package_data = {"pkg": ["README.txt"]}
|
| 43 |
+
dist.package_dir = {"pkg": sources}
|
| 44 |
+
|
| 45 |
+
cmd = build_py(dist)
|
| 46 |
+
cmd.compile = 1
|
| 47 |
+
cmd.ensure_finalized()
|
| 48 |
+
self.assertEqual(cmd.package_data, dist.package_data)
|
| 49 |
+
|
| 50 |
+
cmd.run()
|
| 51 |
+
|
| 52 |
+
# This makes sure the list of outputs includes byte-compiled
|
| 53 |
+
# files for Python modules but not for package data files
|
| 54 |
+
# (there shouldn't *be* byte-code files for those!).
|
| 55 |
+
self.assertEqual(len(cmd.get_outputs()), 3)
|
| 56 |
+
pkgdest = os.path.join(destination, "pkg")
|
| 57 |
+
files = os.listdir(pkgdest)
|
| 58 |
+
pycache_dir = os.path.join(pkgdest, "__pycache__")
|
| 59 |
+
self.assertIn("__init__.py", files)
|
| 60 |
+
self.assertIn("README.txt", files)
|
| 61 |
+
if sys.dont_write_bytecode:
|
| 62 |
+
self.assertFalse(os.path.exists(pycache_dir))
|
| 63 |
+
else:
|
| 64 |
+
pyc_files = os.listdir(pycache_dir)
|
| 65 |
+
self.assertIn("__init__.%s.pyc" % sys.implementation.cache_tag,
|
| 66 |
+
pyc_files)
|
| 67 |
+
|
| 68 |
+
def test_empty_package_dir(self):
|
| 69 |
+
# See bugs #1668596/#1720897
|
| 70 |
+
sources = self.mkdtemp()
|
| 71 |
+
open(os.path.join(sources, "__init__.py"), "w").close()
|
| 72 |
+
|
| 73 |
+
testdir = os.path.join(sources, "doc")
|
| 74 |
+
os.mkdir(testdir)
|
| 75 |
+
open(os.path.join(testdir, "testfile"), "w").close()
|
| 76 |
+
|
| 77 |
+
os.chdir(sources)
|
| 78 |
+
dist = Distribution({"packages": ["pkg"],
|
| 79 |
+
"package_dir": {"pkg": ""},
|
| 80 |
+
"package_data": {"pkg": ["doc/*"]}})
|
| 81 |
+
# script_name need not exist, it just need to be initialized
|
| 82 |
+
dist.script_name = os.path.join(sources, "setup.py")
|
| 83 |
+
dist.script_args = ["build"]
|
| 84 |
+
dist.parse_command_line()
|
| 85 |
+
|
| 86 |
+
try:
|
| 87 |
+
dist.run_commands()
|
| 88 |
+
except DistutilsFileError:
|
| 89 |
+
self.fail("failed package_data test when package_dir is ''")
|
| 90 |
+
|
| 91 |
+
@unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled')
|
| 92 |
+
def test_byte_compile(self):
|
| 93 |
+
project_dir, dist = self.create_dist(py_modules=['boiledeggs'])
|
| 94 |
+
os.chdir(project_dir)
|
| 95 |
+
self.write_file('boiledeggs.py', 'import antigravity')
|
| 96 |
+
cmd = build_py(dist)
|
| 97 |
+
cmd.compile = 1
|
| 98 |
+
cmd.build_lib = 'here'
|
| 99 |
+
cmd.finalize_options()
|
| 100 |
+
cmd.run()
|
| 101 |
+
|
| 102 |
+
found = os.listdir(cmd.build_lib)
|
| 103 |
+
self.assertEqual(sorted(found), ['__pycache__', 'boiledeggs.py'])
|
| 104 |
+
found = os.listdir(os.path.join(cmd.build_lib, '__pycache__'))
|
| 105 |
+
self.assertEqual(found,
|
| 106 |
+
['boiledeggs.%s.pyc' % sys.implementation.cache_tag])
|
| 107 |
+
|
| 108 |
+
@unittest.skipIf(sys.dont_write_bytecode, 'byte-compile disabled')
|
| 109 |
+
def test_byte_compile_optimized(self):
|
| 110 |
+
project_dir, dist = self.create_dist(py_modules=['boiledeggs'])
|
| 111 |
+
os.chdir(project_dir)
|
| 112 |
+
self.write_file('boiledeggs.py', 'import antigravity')
|
| 113 |
+
cmd = build_py(dist)
|
| 114 |
+
cmd.compile = 0
|
| 115 |
+
cmd.optimize = 1
|
| 116 |
+
cmd.build_lib = 'here'
|
| 117 |
+
cmd.finalize_options()
|
| 118 |
+
cmd.run()
|
| 119 |
+
|
| 120 |
+
found = os.listdir(cmd.build_lib)
|
| 121 |
+
self.assertEqual(sorted(found), ['__pycache__', 'boiledeggs.py'])
|
| 122 |
+
found = os.listdir(os.path.join(cmd.build_lib, '__pycache__'))
|
| 123 |
+
expect = 'boiledeggs.{}.opt-1.pyc'.format(sys.implementation.cache_tag)
|
| 124 |
+
self.assertEqual(sorted(found), [expect])
|
| 125 |
+
|
| 126 |
+
def test_dir_in_package_data(self):
|
| 127 |
+
"""
|
| 128 |
+
A directory in package_data should not be added to the filelist.
|
| 129 |
+
"""
|
| 130 |
+
# See bug 19286
|
| 131 |
+
sources = self.mkdtemp()
|
| 132 |
+
pkg_dir = os.path.join(sources, "pkg")
|
| 133 |
+
|
| 134 |
+
os.mkdir(pkg_dir)
|
| 135 |
+
open(os.path.join(pkg_dir, "__init__.py"), "w").close()
|
| 136 |
+
|
| 137 |
+
docdir = os.path.join(pkg_dir, "doc")
|
| 138 |
+
os.mkdir(docdir)
|
| 139 |
+
open(os.path.join(docdir, "testfile"), "w").close()
|
| 140 |
+
|
| 141 |
+
# create the directory that could be incorrectly detected as a file
|
| 142 |
+
os.mkdir(os.path.join(docdir, 'otherdir'))
|
| 143 |
+
|
| 144 |
+
os.chdir(sources)
|
| 145 |
+
dist = Distribution({"packages": ["pkg"],
|
| 146 |
+
"package_data": {"pkg": ["doc/*"]}})
|
| 147 |
+
# script_name need not exist, it just need to be initialized
|
| 148 |
+
dist.script_name = os.path.join(sources, "setup.py")
|
| 149 |
+
dist.script_args = ["build"]
|
| 150 |
+
dist.parse_command_line()
|
| 151 |
+
|
| 152 |
+
try:
|
| 153 |
+
dist.run_commands()
|
| 154 |
+
except DistutilsFileError:
|
| 155 |
+
self.fail("failed package_data when data dir includes a dir")
|
| 156 |
+
|
| 157 |
+
def test_dont_write_bytecode(self):
|
| 158 |
+
# makes sure byte_compile is not used
|
| 159 |
+
dist = self.create_dist()[1]
|
| 160 |
+
cmd = build_py(dist)
|
| 161 |
+
cmd.compile = 1
|
| 162 |
+
cmd.optimize = 1
|
| 163 |
+
|
| 164 |
+
old_dont_write_bytecode = sys.dont_write_bytecode
|
| 165 |
+
sys.dont_write_bytecode = True
|
| 166 |
+
try:
|
| 167 |
+
cmd.byte_compile([])
|
| 168 |
+
finally:
|
| 169 |
+
sys.dont_write_bytecode = old_dont_write_bytecode
|
| 170 |
+
|
| 171 |
+
self.assertIn('byte-compiling is disabled',
|
| 172 |
+
self.logs[0][1] % self.logs[0][2])
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def test_suite():
|
| 176 |
+
return unittest.makeSuite(BuildPyTestCase)
|
| 177 |
+
|
| 178 |
+
if __name__ == "__main__":
|
| 179 |
+
run_unittest(test_suite())
|
evalkit_llava/lib/python3.10/distutils/tests/test_build_scripts.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.build_scripts."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import unittest
|
| 5 |
+
|
| 6 |
+
from distutils.command.build_scripts import build_scripts
|
| 7 |
+
from distutils.core import Distribution
|
| 8 |
+
from distutils import sysconfig
|
| 9 |
+
|
| 10 |
+
from distutils.tests import support
|
| 11 |
+
from test.support import run_unittest
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BuildScriptsTestCase(support.TempdirManager,
|
| 15 |
+
support.LoggingSilencer,
|
| 16 |
+
unittest.TestCase):
|
| 17 |
+
|
| 18 |
+
def test_default_settings(self):
|
| 19 |
+
cmd = self.get_build_scripts_cmd("/foo/bar", [])
|
| 20 |
+
self.assertFalse(cmd.force)
|
| 21 |
+
self.assertIsNone(cmd.build_dir)
|
| 22 |
+
|
| 23 |
+
cmd.finalize_options()
|
| 24 |
+
|
| 25 |
+
self.assertTrue(cmd.force)
|
| 26 |
+
self.assertEqual(cmd.build_dir, "/foo/bar")
|
| 27 |
+
|
| 28 |
+
def test_build(self):
|
| 29 |
+
source = self.mkdtemp()
|
| 30 |
+
target = self.mkdtemp()
|
| 31 |
+
expected = self.write_sample_scripts(source)
|
| 32 |
+
|
| 33 |
+
cmd = self.get_build_scripts_cmd(target,
|
| 34 |
+
[os.path.join(source, fn)
|
| 35 |
+
for fn in expected])
|
| 36 |
+
cmd.finalize_options()
|
| 37 |
+
cmd.run()
|
| 38 |
+
|
| 39 |
+
built = os.listdir(target)
|
| 40 |
+
for name in expected:
|
| 41 |
+
self.assertIn(name, built)
|
| 42 |
+
|
| 43 |
+
def get_build_scripts_cmd(self, target, scripts):
|
| 44 |
+
import sys
|
| 45 |
+
dist = Distribution()
|
| 46 |
+
dist.scripts = scripts
|
| 47 |
+
dist.command_obj["build"] = support.DummyCommand(
|
| 48 |
+
build_scripts=target,
|
| 49 |
+
force=1,
|
| 50 |
+
executable=sys.executable
|
| 51 |
+
)
|
| 52 |
+
return build_scripts(dist)
|
| 53 |
+
|
| 54 |
+
def write_sample_scripts(self, dir):
|
| 55 |
+
expected = []
|
| 56 |
+
expected.append("script1.py")
|
| 57 |
+
self.write_script(dir, "script1.py",
|
| 58 |
+
("#! /usr/bin/env python2.3\n"
|
| 59 |
+
"# bogus script w/ Python sh-bang\n"
|
| 60 |
+
"pass\n"))
|
| 61 |
+
expected.append("script2.py")
|
| 62 |
+
self.write_script(dir, "script2.py",
|
| 63 |
+
("#!/usr/bin/python\n"
|
| 64 |
+
"# bogus script w/ Python sh-bang\n"
|
| 65 |
+
"pass\n"))
|
| 66 |
+
expected.append("shell.sh")
|
| 67 |
+
self.write_script(dir, "shell.sh",
|
| 68 |
+
("#!/bin/sh\n"
|
| 69 |
+
"# bogus shell script w/ sh-bang\n"
|
| 70 |
+
"exit 0\n"))
|
| 71 |
+
return expected
|
| 72 |
+
|
| 73 |
+
def write_script(self, dir, name, text):
|
| 74 |
+
f = open(os.path.join(dir, name), "w")
|
| 75 |
+
try:
|
| 76 |
+
f.write(text)
|
| 77 |
+
finally:
|
| 78 |
+
f.close()
|
| 79 |
+
|
| 80 |
+
def test_version_int(self):
|
| 81 |
+
source = self.mkdtemp()
|
| 82 |
+
target = self.mkdtemp()
|
| 83 |
+
expected = self.write_sample_scripts(source)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
cmd = self.get_build_scripts_cmd(target,
|
| 87 |
+
[os.path.join(source, fn)
|
| 88 |
+
for fn in expected])
|
| 89 |
+
cmd.finalize_options()
|
| 90 |
+
|
| 91 |
+
# http://bugs.python.org/issue4524
|
| 92 |
+
#
|
| 93 |
+
# On linux-g++-32 with command line `./configure --enable-ipv6
|
| 94 |
+
# --with-suffix=3`, python is compiled okay but the build scripts
|
| 95 |
+
# failed when writing the name of the executable
|
| 96 |
+
old = sysconfig.get_config_vars().get('VERSION')
|
| 97 |
+
sysconfig._config_vars['VERSION'] = 4
|
| 98 |
+
try:
|
| 99 |
+
cmd.run()
|
| 100 |
+
finally:
|
| 101 |
+
if old is not None:
|
| 102 |
+
sysconfig._config_vars['VERSION'] = old
|
| 103 |
+
|
| 104 |
+
built = os.listdir(target)
|
| 105 |
+
for name in expected:
|
| 106 |
+
self.assertIn(name, built)
|
| 107 |
+
|
| 108 |
+
def test_suite():
|
| 109 |
+
return unittest.makeSuite(BuildScriptsTestCase)
|
| 110 |
+
|
| 111 |
+
if __name__ == "__main__":
|
| 112 |
+
run_unittest(test_suite())
|
evalkit_llava/lib/python3.10/distutils/tests/test_check.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.command.check."""
|
| 2 |
+
import os
|
| 3 |
+
import textwrap
|
| 4 |
+
import unittest
|
| 5 |
+
from test.support import run_unittest
|
| 6 |
+
|
| 7 |
+
from distutils.command.check import check, HAS_DOCUTILS
|
| 8 |
+
from distutils.tests import support
|
| 9 |
+
from distutils.errors import DistutilsSetupError
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
import pygments
|
| 13 |
+
except ImportError:
|
| 14 |
+
pygments = None
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
HERE = os.path.dirname(__file__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class CheckTestCase(support.LoggingSilencer,
|
| 21 |
+
support.TempdirManager,
|
| 22 |
+
unittest.TestCase):
|
| 23 |
+
|
| 24 |
+
def _run(self, metadata=None, cwd=None, **options):
|
| 25 |
+
if metadata is None:
|
| 26 |
+
metadata = {}
|
| 27 |
+
if cwd is not None:
|
| 28 |
+
old_dir = os.getcwd()
|
| 29 |
+
os.chdir(cwd)
|
| 30 |
+
pkg_info, dist = self.create_dist(**metadata)
|
| 31 |
+
cmd = check(dist)
|
| 32 |
+
cmd.initialize_options()
|
| 33 |
+
for name, value in options.items():
|
| 34 |
+
setattr(cmd, name, value)
|
| 35 |
+
cmd.ensure_finalized()
|
| 36 |
+
cmd.run()
|
| 37 |
+
if cwd is not None:
|
| 38 |
+
os.chdir(old_dir)
|
| 39 |
+
return cmd
|
| 40 |
+
|
| 41 |
+
def test_check_metadata(self):
|
| 42 |
+
# let's run the command with no metadata at all
|
| 43 |
+
# by default, check is checking the metadata
|
| 44 |
+
# should have some warnings
|
| 45 |
+
cmd = self._run()
|
| 46 |
+
self.assertEqual(cmd._warnings, 2)
|
| 47 |
+
|
| 48 |
+
# now let's add the required fields
|
| 49 |
+
# and run it again, to make sure we don't get
|
| 50 |
+
# any warning anymore
|
| 51 |
+
metadata = {'url': 'xxx', 'author': 'xxx',
|
| 52 |
+
'author_email': 'xxx',
|
| 53 |
+
'name': 'xxx', 'version': 'xxx'}
|
| 54 |
+
cmd = self._run(metadata)
|
| 55 |
+
self.assertEqual(cmd._warnings, 0)
|
| 56 |
+
|
| 57 |
+
# now with the strict mode, we should
|
| 58 |
+
# get an error if there are missing metadata
|
| 59 |
+
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
|
| 60 |
+
|
| 61 |
+
# and of course, no error when all metadata are present
|
| 62 |
+
cmd = self._run(metadata, strict=1)
|
| 63 |
+
self.assertEqual(cmd._warnings, 0)
|
| 64 |
+
|
| 65 |
+
# now a test with non-ASCII characters
|
| 66 |
+
metadata = {'url': 'xxx', 'author': '\u00c9ric',
|
| 67 |
+
'author_email': 'xxx', 'name': 'xxx',
|
| 68 |
+
'version': 'xxx',
|
| 69 |
+
'description': 'Something about esszet \u00df',
|
| 70 |
+
'long_description': 'More things about esszet \u00df'}
|
| 71 |
+
cmd = self._run(metadata)
|
| 72 |
+
self.assertEqual(cmd._warnings, 0)
|
| 73 |
+
|
| 74 |
+
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
|
| 75 |
+
def test_check_document(self):
|
| 76 |
+
pkg_info, dist = self.create_dist()
|
| 77 |
+
cmd = check(dist)
|
| 78 |
+
|
| 79 |
+
# let's see if it detects broken rest
|
| 80 |
+
broken_rest = 'title\n===\n\ntest'
|
| 81 |
+
msgs = cmd._check_rst_data(broken_rest)
|
| 82 |
+
self.assertEqual(len(msgs), 1)
|
| 83 |
+
|
| 84 |
+
# and non-broken rest
|
| 85 |
+
rest = 'title\n=====\n\ntest'
|
| 86 |
+
msgs = cmd._check_rst_data(rest)
|
| 87 |
+
self.assertEqual(len(msgs), 0)
|
| 88 |
+
|
| 89 |
+
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
|
| 90 |
+
def test_check_restructuredtext(self):
|
| 91 |
+
# let's see if it detects broken rest in long_description
|
| 92 |
+
broken_rest = 'title\n===\n\ntest'
|
| 93 |
+
pkg_info, dist = self.create_dist(long_description=broken_rest)
|
| 94 |
+
cmd = check(dist)
|
| 95 |
+
cmd.check_restructuredtext()
|
| 96 |
+
self.assertEqual(cmd._warnings, 1)
|
| 97 |
+
|
| 98 |
+
# let's see if we have an error with strict=1
|
| 99 |
+
metadata = {'url': 'xxx', 'author': 'xxx',
|
| 100 |
+
'author_email': 'xxx',
|
| 101 |
+
'name': 'xxx', 'version': 'xxx',
|
| 102 |
+
'long_description': broken_rest}
|
| 103 |
+
self.assertRaises(DistutilsSetupError, self._run, metadata,
|
| 104 |
+
**{'strict': 1, 'restructuredtext': 1})
|
| 105 |
+
|
| 106 |
+
# and non-broken rest, including a non-ASCII character to test #12114
|
| 107 |
+
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
|
| 108 |
+
cmd = self._run(metadata, strict=1, restructuredtext=1)
|
| 109 |
+
self.assertEqual(cmd._warnings, 0)
|
| 110 |
+
|
| 111 |
+
# check that includes work to test #31292
|
| 112 |
+
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
|
| 113 |
+
cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
|
| 114 |
+
self.assertEqual(cmd._warnings, 0)
|
| 115 |
+
|
| 116 |
+
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
|
| 117 |
+
def test_check_restructuredtext_with_syntax_highlight(self):
|
| 118 |
+
# Don't fail if there is a `code` or `code-block` directive
|
| 119 |
+
|
| 120 |
+
example_rst_docs = []
|
| 121 |
+
example_rst_docs.append(textwrap.dedent("""\
|
| 122 |
+
Here's some code:
|
| 123 |
+
|
| 124 |
+
.. code:: python
|
| 125 |
+
|
| 126 |
+
def foo():
|
| 127 |
+
pass
|
| 128 |
+
"""))
|
| 129 |
+
example_rst_docs.append(textwrap.dedent("""\
|
| 130 |
+
Here's some code:
|
| 131 |
+
|
| 132 |
+
.. code-block:: python
|
| 133 |
+
|
| 134 |
+
def foo():
|
| 135 |
+
pass
|
| 136 |
+
"""))
|
| 137 |
+
|
| 138 |
+
for rest_with_code in example_rst_docs:
|
| 139 |
+
pkg_info, dist = self.create_dist(long_description=rest_with_code)
|
| 140 |
+
cmd = check(dist)
|
| 141 |
+
cmd.check_restructuredtext()
|
| 142 |
+
msgs = cmd._check_rst_data(rest_with_code)
|
| 143 |
+
if pygments is not None:
|
| 144 |
+
self.assertEqual(len(msgs), 0)
|
| 145 |
+
else:
|
| 146 |
+
self.assertEqual(len(msgs), 1)
|
| 147 |
+
self.assertEqual(
|
| 148 |
+
str(msgs[0][1]),
|
| 149 |
+
'Cannot analyze code. Pygments package not found.'
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
def test_check_all(self):
|
| 153 |
+
|
| 154 |
+
metadata = {'url': 'xxx', 'author': 'xxx'}
|
| 155 |
+
self.assertRaises(DistutilsSetupError, self._run,
|
| 156 |
+
{}, **{'strict': 1,
|
| 157 |
+
'restructuredtext': 1})
|
| 158 |
+
|
| 159 |
+
def test_suite():
|
| 160 |
+
return unittest.makeSuite(CheckTestCase)
|
| 161 |
+
|
| 162 |
+
if __name__ == "__main__":
|
| 163 |
+
run_unittest(test_suite())
|
evalkit_llava/lib/python3.10/distutils/tests/test_cmd.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.cmd."""
|
| 2 |
+
import unittest
|
| 3 |
+
import os
|
| 4 |
+
from test.support import captured_stdout, run_unittest
|
| 5 |
+
|
| 6 |
+
from distutils.cmd import Command
|
| 7 |
+
from distutils.dist import Distribution
|
| 8 |
+
from distutils.errors import DistutilsOptionError
|
| 9 |
+
from distutils import debug
|
| 10 |
+
|
| 11 |
+
class MyCmd(Command):
|
| 12 |
+
def initialize_options(self):
|
| 13 |
+
pass
|
| 14 |
+
|
| 15 |
+
class CommandTestCase(unittest.TestCase):
|
| 16 |
+
|
| 17 |
+
def setUp(self):
|
| 18 |
+
dist = Distribution()
|
| 19 |
+
self.cmd = MyCmd(dist)
|
| 20 |
+
|
| 21 |
+
def test_ensure_string_list(self):
|
| 22 |
+
|
| 23 |
+
cmd = self.cmd
|
| 24 |
+
cmd.not_string_list = ['one', 2, 'three']
|
| 25 |
+
cmd.yes_string_list = ['one', 'two', 'three']
|
| 26 |
+
cmd.not_string_list2 = object()
|
| 27 |
+
cmd.yes_string_list2 = 'ok'
|
| 28 |
+
cmd.ensure_string_list('yes_string_list')
|
| 29 |
+
cmd.ensure_string_list('yes_string_list2')
|
| 30 |
+
|
| 31 |
+
self.assertRaises(DistutilsOptionError,
|
| 32 |
+
cmd.ensure_string_list, 'not_string_list')
|
| 33 |
+
|
| 34 |
+
self.assertRaises(DistutilsOptionError,
|
| 35 |
+
cmd.ensure_string_list, 'not_string_list2')
|
| 36 |
+
|
| 37 |
+
cmd.option1 = 'ok,dok'
|
| 38 |
+
cmd.ensure_string_list('option1')
|
| 39 |
+
self.assertEqual(cmd.option1, ['ok', 'dok'])
|
| 40 |
+
|
| 41 |
+
cmd.option2 = ['xxx', 'www']
|
| 42 |
+
cmd.ensure_string_list('option2')
|
| 43 |
+
|
| 44 |
+
cmd.option3 = ['ok', 2]
|
| 45 |
+
self.assertRaises(DistutilsOptionError, cmd.ensure_string_list,
|
| 46 |
+
'option3')
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def test_make_file(self):
|
| 50 |
+
|
| 51 |
+
cmd = self.cmd
|
| 52 |
+
|
| 53 |
+
# making sure it raises when infiles is not a string or a list/tuple
|
| 54 |
+
self.assertRaises(TypeError, cmd.make_file,
|
| 55 |
+
infiles=1, outfile='', func='func', args=())
|
| 56 |
+
|
| 57 |
+
# making sure execute gets called properly
|
| 58 |
+
def _execute(func, args, exec_msg, level):
|
| 59 |
+
self.assertEqual(exec_msg, 'generating out from in')
|
| 60 |
+
cmd.force = True
|
| 61 |
+
cmd.execute = _execute
|
| 62 |
+
cmd.make_file(infiles='in', outfile='out', func='func', args=())
|
| 63 |
+
|
| 64 |
+
def test_dump_options(self):
|
| 65 |
+
|
| 66 |
+
msgs = []
|
| 67 |
+
def _announce(msg, level):
|
| 68 |
+
msgs.append(msg)
|
| 69 |
+
cmd = self.cmd
|
| 70 |
+
cmd.announce = _announce
|
| 71 |
+
cmd.option1 = 1
|
| 72 |
+
cmd.option2 = 1
|
| 73 |
+
cmd.user_options = [('option1', '', ''), ('option2', '', '')]
|
| 74 |
+
cmd.dump_options()
|
| 75 |
+
|
| 76 |
+
wanted = ["command options for 'MyCmd':", ' option1 = 1',
|
| 77 |
+
' option2 = 1']
|
| 78 |
+
self.assertEqual(msgs, wanted)
|
| 79 |
+
|
| 80 |
+
def test_ensure_string(self):
|
| 81 |
+
cmd = self.cmd
|
| 82 |
+
cmd.option1 = 'ok'
|
| 83 |
+
cmd.ensure_string('option1')
|
| 84 |
+
|
| 85 |
+
cmd.option2 = None
|
| 86 |
+
cmd.ensure_string('option2', 'xxx')
|
| 87 |
+
self.assertTrue(hasattr(cmd, 'option2'))
|
| 88 |
+
|
| 89 |
+
cmd.option3 = 1
|
| 90 |
+
self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3')
|
| 91 |
+
|
| 92 |
+
def test_ensure_filename(self):
|
| 93 |
+
cmd = self.cmd
|
| 94 |
+
cmd.option1 = __file__
|
| 95 |
+
cmd.ensure_filename('option1')
|
| 96 |
+
cmd.option2 = 'xxx'
|
| 97 |
+
self.assertRaises(DistutilsOptionError, cmd.ensure_filename, 'option2')
|
| 98 |
+
|
| 99 |
+
def test_ensure_dirname(self):
|
| 100 |
+
cmd = self.cmd
|
| 101 |
+
cmd.option1 = os.path.dirname(__file__) or os.curdir
|
| 102 |
+
cmd.ensure_dirname('option1')
|
| 103 |
+
cmd.option2 = 'xxx'
|
| 104 |
+
self.assertRaises(DistutilsOptionError, cmd.ensure_dirname, 'option2')
|
| 105 |
+
|
| 106 |
+
def test_debug_print(self):
|
| 107 |
+
cmd = self.cmd
|
| 108 |
+
with captured_stdout() as stdout:
|
| 109 |
+
cmd.debug_print('xxx')
|
| 110 |
+
stdout.seek(0)
|
| 111 |
+
self.assertEqual(stdout.read(), '')
|
| 112 |
+
|
| 113 |
+
debug.DEBUG = True
|
| 114 |
+
try:
|
| 115 |
+
with captured_stdout() as stdout:
|
| 116 |
+
cmd.debug_print('xxx')
|
| 117 |
+
stdout.seek(0)
|
| 118 |
+
self.assertEqual(stdout.read(), 'xxx\n')
|
| 119 |
+
finally:
|
| 120 |
+
debug.DEBUG = False
|
| 121 |
+
|
| 122 |
+
def test_suite():
|
| 123 |
+
return unittest.makeSuite(CommandTestCase)
|
| 124 |
+
|
| 125 |
+
if __name__ == '__main__':
|
| 126 |
+
run_unittest(test_suite())
|
evalkit_llava/lib/python3.10/distutils/tests/test_file_util.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for distutils.file_util."""
|
| 2 |
+
import unittest
|
| 3 |
+
import os
|
| 4 |
+
import errno
|
| 5 |
+
from unittest.mock import patch
|
| 6 |
+
|
| 7 |
+
from distutils.file_util import move_file, copy_file
|
| 8 |
+
from distutils import log
|
| 9 |
+
from distutils.tests import support
|
| 10 |
+
from distutils.errors import DistutilsFileError
|
| 11 |
+
from test.support import run_unittest
|
| 12 |
+
from test.support.os_helper import unlink
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class FileUtilTestCase(support.TempdirManager, unittest.TestCase):
|
| 16 |
+
|
| 17 |
+
def _log(self, msg, *args):
|
| 18 |
+
if len(args) > 0:
|
| 19 |
+
self._logs.append(msg % args)
|
| 20 |
+
else:
|
| 21 |
+
self._logs.append(msg)
|
| 22 |
+
|
| 23 |
+
def setUp(self):
|
| 24 |
+
super(FileUtilTestCase, self).setUp()
|
| 25 |
+
self._logs = []
|
| 26 |
+
self.old_log = log.info
|
| 27 |
+
log.info = self._log
|
| 28 |
+
tmp_dir = self.mkdtemp()
|
| 29 |
+
self.source = os.path.join(tmp_dir, 'f1')
|
| 30 |
+
self.target = os.path.join(tmp_dir, 'f2')
|
| 31 |
+
self.target_dir = os.path.join(tmp_dir, 'd1')
|
| 32 |
+
|
| 33 |
+
def tearDown(self):
|
| 34 |
+
log.info = self.old_log
|
| 35 |
+
super(FileUtilTestCase, self).tearDown()
|
| 36 |
+
|
| 37 |
+
def test_move_file_verbosity(self):
|
| 38 |
+
f = open(self.source, 'w')
|
| 39 |
+
try:
|
| 40 |
+
f.write('some content')
|
| 41 |
+
finally:
|
| 42 |
+
f.close()
|
| 43 |
+
|
| 44 |
+
move_file(self.source, self.target, verbose=0)
|
| 45 |
+
wanted = []
|
| 46 |
+
self.assertEqual(self._logs, wanted)
|
| 47 |
+
|
| 48 |
+
# back to original state
|
| 49 |
+
move_file(self.target, self.source, verbose=0)
|
| 50 |
+
|
| 51 |
+
move_file(self.source, self.target, verbose=1)
|
| 52 |
+
wanted = ['moving %s -> %s' % (self.source, self.target)]
|
| 53 |
+
self.assertEqual(self._logs, wanted)
|
| 54 |
+
|
| 55 |
+
# back to original state
|
| 56 |
+
move_file(self.target, self.source, verbose=0)
|
| 57 |
+
|
| 58 |
+
self._logs = []
|
| 59 |
+
# now the target is a dir
|
| 60 |
+
os.mkdir(self.target_dir)
|
| 61 |
+
move_file(self.source, self.target_dir, verbose=1)
|
| 62 |
+
wanted = ['moving %s -> %s' % (self.source, self.target_dir)]
|
| 63 |
+
self.assertEqual(self._logs, wanted)
|
| 64 |
+
|
| 65 |
+
def test_move_file_exception_unpacking_rename(self):
|
| 66 |
+
# see issue 22182
|
| 67 |
+
with patch("os.rename", side_effect=OSError("wrong", 1)), \
|
| 68 |
+
self.assertRaises(DistutilsFileError):
|
| 69 |
+
with open(self.source, 'w') as fobj:
|
| 70 |
+
fobj.write('spam eggs')
|
| 71 |
+
move_file(self.source, self.target, verbose=0)
|
| 72 |
+
|
| 73 |
+
def test_move_file_exception_unpacking_unlink(self):
|
| 74 |
+
# see issue 22182
|
| 75 |
+
with patch("os.rename", side_effect=OSError(errno.EXDEV, "wrong")), \
|
| 76 |
+
patch("os.unlink", side_effect=OSError("wrong", 1)), \
|
| 77 |
+
self.assertRaises(DistutilsFileError):
|
| 78 |
+
with open(self.source, 'w') as fobj:
|
| 79 |
+
fobj.write('spam eggs')
|
| 80 |
+
move_file(self.source, self.target, verbose=0)
|
| 81 |
+
|
| 82 |
+
def test_copy_file_hard_link(self):
|
| 83 |
+
with open(self.source, 'w') as f:
|
| 84 |
+
f.write('some content')
|
| 85 |
+
# Check first that copy_file() will not fall back on copying the file
|
| 86 |
+
# instead of creating the hard link.
|
| 87 |
+
try:
|
| 88 |
+
os.link(self.source, self.target)
|
| 89 |
+
except OSError as e:
|
| 90 |
+
self.skipTest('os.link: %s' % e)
|
| 91 |
+
else:
|
| 92 |
+
unlink(self.target)
|
| 93 |
+
st = os.stat(self.source)
|
| 94 |
+
copy_file(self.source, self.target, link='hard')
|
| 95 |
+
st2 = os.stat(self.source)
|
| 96 |
+
st3 = os.stat(self.target)
|
| 97 |
+
self.assertTrue(os.path.samestat(st, st2), (st, st2))
|
| 98 |
+
self.assertTrue(os.path.samestat(st2, st3), (st2, st3))
|
| 99 |
+
with open(self.source, 'r') as f:
|
| 100 |
+
self.assertEqual(f.read(), 'some content')
|
| 101 |
+
|
| 102 |
+
def test_copy_file_hard_link_failure(self):
|
| 103 |
+
# If hard linking fails, copy_file() falls back on copying file
|
| 104 |
+
# (some special filesystems don't support hard linking even under
|
| 105 |
+
# Unix, see issue #8876).
|
| 106 |
+
with open(self.source, 'w') as f:
|
| 107 |
+
f.write('some content')
|
| 108 |
+
st = os.stat(self.source)
|
| 109 |
+
with patch("os.link", side_effect=OSError(0, "linking unsupported")):
|
| 110 |
+
copy_file(self.source, self.target, link='hard')
|
| 111 |
+
st2 = os.stat(self.source)
|
| 112 |
+
st3 = os.stat(self.target)
|
| 113 |
+
self.assertTrue(os.path.samestat(st, st2), (st, st2))
|
| 114 |
+
self.assertFalse(os.path.samestat(st2, st3), (st2, st3))
|
| 115 |
+
for fn in (self.source, self.target):
|
| 116 |
+
with open(fn, 'r') as f:
|
| 117 |
+
self.assertEqual(f.read(), 'some content')
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def test_suite():
|
| 121 |
+
return unittest.makeSuite(FileUtilTestCase)
|
| 122 |
+
|
| 123 |
+
if __name__ == "__main__":
|
| 124 |
+
run_unittest(test_suite())
|