Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/archive.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/asyn.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/caching.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/callbacks.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/compression.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/config.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/conftest.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/core.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/dircache.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/exceptions.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/fuse.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/generic.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/mapping.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/parquet.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/registry.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/spec.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/transaction.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/utils.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/caching.py +875 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/exceptions.py +17 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/copy.py +557 -0
- evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc +3 -0
- evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc +3 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__init__.py +15 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_elffile.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_manylinux.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_musllinux.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_parser.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_tokenizer.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/markers.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/specifiers.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/tags.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/utils.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/version.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/_elffile.py +110 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/_manylinux.py +263 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/_musllinux.py +85 -0
- evalkit_internvl/lib/python3.10/site-packages/packaging/_parser.py +354 -0
.gitattributes
CHANGED
|
@@ -583,3 +583,5 @@ evalkit_internvl/bin/bunzip2 filter=lfs diff=lfs merge=lfs -text
|
|
| 583 |
evalkit_internvl/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
|
| 584 |
evalkit_internvl/lib/python3.10/site-packages/numpy.libs/libquadmath-96973f99.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 585 |
evalkit_internvl/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 583 |
evalkit_internvl/bin/sqlite3 filter=lfs diff=lfs merge=lfs -text
|
| 584 |
evalkit_internvl/lib/python3.10/site-packages/numpy.libs/libquadmath-96973f99.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 585 |
evalkit_internvl/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 586 |
+
evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 587 |
+
evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.45 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc
ADDED
|
Binary file (495 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/archive.cpython-310.pyc
ADDED
|
Binary file (2.98 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/asyn.cpython-310.pyc
ADDED
|
Binary file (29.3 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/caching.cpython-310.pyc
ADDED
|
Binary file (22 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/callbacks.cpython-310.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/compression.cpython-310.pyc
ADDED
|
Binary file (5.09 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (3.82 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/conftest.cpython-310.pyc
ADDED
|
Binary file (1.56 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/core.cpython-310.pyc
ADDED
|
Binary file (21.2 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/dircache.cpython-310.pyc
ADDED
|
Binary file (3.41 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/exceptions.cpython-310.pyc
ADDED
|
Binary file (732 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/fuse.cpython-310.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/generic.cpython-310.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc
ADDED
|
Binary file (14.6 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/mapping.cpython-310.pyc
ADDED
|
Binary file (8.85 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/parquet.cpython-310.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/registry.cpython-310.pyc
ADDED
|
Binary file (8.6 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/spec.cpython-310.pyc
ADDED
|
Binary file (58.4 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/transaction.cpython-310.pyc
ADDED
|
Binary file (3.14 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (19.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/caching.py
ADDED
|
@@ -0,0 +1,875 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import collections
|
| 4 |
+
import functools
|
| 5 |
+
import logging
|
| 6 |
+
import math
|
| 7 |
+
import os
|
| 8 |
+
import threading
|
| 9 |
+
import warnings
|
| 10 |
+
from concurrent.futures import Future, ThreadPoolExecutor
|
| 11 |
+
from typing import (
|
| 12 |
+
TYPE_CHECKING,
|
| 13 |
+
Any,
|
| 14 |
+
Callable,
|
| 15 |
+
ClassVar,
|
| 16 |
+
Generic,
|
| 17 |
+
NamedTuple,
|
| 18 |
+
OrderedDict,
|
| 19 |
+
TypeVar,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
if TYPE_CHECKING:
|
| 23 |
+
import mmap
|
| 24 |
+
|
| 25 |
+
from typing_extensions import ParamSpec
|
| 26 |
+
|
| 27 |
+
P = ParamSpec("P")
|
| 28 |
+
else:
|
| 29 |
+
P = TypeVar("P")
|
| 30 |
+
|
| 31 |
+
T = TypeVar("T")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
logger = logging.getLogger("fsspec")
|
| 35 |
+
|
| 36 |
+
Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class BaseCache:
|
| 40 |
+
"""Pass-though cache: doesn't keep anything, calls every time
|
| 41 |
+
|
| 42 |
+
Acts as base class for other cachers
|
| 43 |
+
|
| 44 |
+
Parameters
|
| 45 |
+
----------
|
| 46 |
+
blocksize: int
|
| 47 |
+
How far to read ahead in numbers of bytes
|
| 48 |
+
fetcher: func
|
| 49 |
+
Function of the form f(start, end) which gets bytes from remote as
|
| 50 |
+
specified
|
| 51 |
+
size: int
|
| 52 |
+
How big this file is
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
name: ClassVar[str] = "none"
|
| 56 |
+
|
| 57 |
+
def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
|
| 58 |
+
self.blocksize = blocksize
|
| 59 |
+
self.fetcher = fetcher
|
| 60 |
+
self.size = size
|
| 61 |
+
|
| 62 |
+
def _fetch(self, start: int | None, stop: int | None) -> bytes:
|
| 63 |
+
if start is None:
|
| 64 |
+
start = 0
|
| 65 |
+
if stop is None:
|
| 66 |
+
stop = self.size
|
| 67 |
+
if start >= self.size or start >= stop:
|
| 68 |
+
return b""
|
| 69 |
+
return self.fetcher(start, stop)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class MMapCache(BaseCache):
|
| 73 |
+
"""memory-mapped sparse file cache
|
| 74 |
+
|
| 75 |
+
Opens temporary file, which is filled blocks-wise when data is requested.
|
| 76 |
+
Ensure there is enough disc space in the temporary location.
|
| 77 |
+
|
| 78 |
+
This cache method might only work on posix
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
name = "mmap"
|
| 82 |
+
|
| 83 |
+
def __init__(
|
| 84 |
+
self,
|
| 85 |
+
blocksize: int,
|
| 86 |
+
fetcher: Fetcher,
|
| 87 |
+
size: int,
|
| 88 |
+
location: str | None = None,
|
| 89 |
+
blocks: set[int] | None = None,
|
| 90 |
+
) -> None:
|
| 91 |
+
super().__init__(blocksize, fetcher, size)
|
| 92 |
+
self.blocks = set() if blocks is None else blocks
|
| 93 |
+
self.location = location
|
| 94 |
+
self.cache = self._makefile()
|
| 95 |
+
|
| 96 |
+
def _makefile(self) -> mmap.mmap | bytearray:
|
| 97 |
+
import mmap
|
| 98 |
+
import tempfile
|
| 99 |
+
|
| 100 |
+
if self.size == 0:
|
| 101 |
+
return bytearray()
|
| 102 |
+
|
| 103 |
+
# posix version
|
| 104 |
+
if self.location is None or not os.path.exists(self.location):
|
| 105 |
+
if self.location is None:
|
| 106 |
+
fd = tempfile.TemporaryFile()
|
| 107 |
+
self.blocks = set()
|
| 108 |
+
else:
|
| 109 |
+
fd = open(self.location, "wb+")
|
| 110 |
+
fd.seek(self.size - 1)
|
| 111 |
+
fd.write(b"1")
|
| 112 |
+
fd.flush()
|
| 113 |
+
else:
|
| 114 |
+
fd = open(self.location, "r+b")
|
| 115 |
+
|
| 116 |
+
return mmap.mmap(fd.fileno(), self.size)
|
| 117 |
+
|
| 118 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
| 119 |
+
logger.debug(f"MMap cache fetching {start}-{end}")
|
| 120 |
+
if start is None:
|
| 121 |
+
start = 0
|
| 122 |
+
if end is None:
|
| 123 |
+
end = self.size
|
| 124 |
+
if start >= self.size or start >= end:
|
| 125 |
+
return b""
|
| 126 |
+
start_block = start // self.blocksize
|
| 127 |
+
end_block = end // self.blocksize
|
| 128 |
+
need = [i for i in range(start_block, end_block + 1) if i not in self.blocks]
|
| 129 |
+
while need:
|
| 130 |
+
# TODO: not a for loop so we can consolidate blocks later to
|
| 131 |
+
# make fewer fetch calls; this could be parallel
|
| 132 |
+
i = need.pop(0)
|
| 133 |
+
sstart = i * self.blocksize
|
| 134 |
+
send = min(sstart + self.blocksize, self.size)
|
| 135 |
+
logger.debug(f"MMap get block #{i} ({sstart}-{send}")
|
| 136 |
+
self.cache[sstart:send] = self.fetcher(sstart, send)
|
| 137 |
+
self.blocks.add(i)
|
| 138 |
+
|
| 139 |
+
return self.cache[start:end]
|
| 140 |
+
|
| 141 |
+
def __getstate__(self) -> dict[str, Any]:
|
| 142 |
+
state = self.__dict__.copy()
|
| 143 |
+
# Remove the unpicklable entries.
|
| 144 |
+
del state["cache"]
|
| 145 |
+
return state
|
| 146 |
+
|
| 147 |
+
def __setstate__(self, state: dict[str, Any]) -> None:
|
| 148 |
+
# Restore instance attributes
|
| 149 |
+
self.__dict__.update(state)
|
| 150 |
+
self.cache = self._makefile()
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class ReadAheadCache(BaseCache):
|
| 154 |
+
"""Cache which reads only when we get beyond a block of data
|
| 155 |
+
|
| 156 |
+
This is a much simpler version of BytesCache, and does not attempt to
|
| 157 |
+
fill holes in the cache or keep fragments alive. It is best suited to
|
| 158 |
+
many small reads in a sequential order (e.g., reading lines from a file).
|
| 159 |
+
"""
|
| 160 |
+
|
| 161 |
+
name = "readahead"
|
| 162 |
+
|
| 163 |
+
def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
|
| 164 |
+
super().__init__(blocksize, fetcher, size)
|
| 165 |
+
self.cache = b""
|
| 166 |
+
self.start = 0
|
| 167 |
+
self.end = 0
|
| 168 |
+
|
| 169 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
| 170 |
+
if start is None:
|
| 171 |
+
start = 0
|
| 172 |
+
if end is None or end > self.size:
|
| 173 |
+
end = self.size
|
| 174 |
+
if start >= self.size or start >= end:
|
| 175 |
+
return b""
|
| 176 |
+
l = end - start
|
| 177 |
+
if start >= self.start and end <= self.end:
|
| 178 |
+
# cache hit
|
| 179 |
+
return self.cache[start - self.start : end - self.start]
|
| 180 |
+
elif self.start <= start < self.end:
|
| 181 |
+
# partial hit
|
| 182 |
+
part = self.cache[start - self.start :]
|
| 183 |
+
l -= len(part)
|
| 184 |
+
start = self.end
|
| 185 |
+
else:
|
| 186 |
+
# miss
|
| 187 |
+
part = b""
|
| 188 |
+
end = min(self.size, end + self.blocksize)
|
| 189 |
+
self.cache = self.fetcher(start, end) # new block replaces old
|
| 190 |
+
self.start = start
|
| 191 |
+
self.end = self.start + len(self.cache)
|
| 192 |
+
return part + self.cache[:l]
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class FirstChunkCache(BaseCache):
|
| 196 |
+
"""Caches the first block of a file only
|
| 197 |
+
|
| 198 |
+
This may be useful for file types where the metadata is stored in the header,
|
| 199 |
+
but is randomly accessed.
|
| 200 |
+
"""
|
| 201 |
+
|
| 202 |
+
name = "first"
|
| 203 |
+
|
| 204 |
+
def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
|
| 205 |
+
super().__init__(blocksize, fetcher, size)
|
| 206 |
+
self.cache: bytes | None = None
|
| 207 |
+
|
| 208 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
| 209 |
+
start = start or 0
|
| 210 |
+
end = end or self.size
|
| 211 |
+
if start < self.blocksize:
|
| 212 |
+
if self.cache is None:
|
| 213 |
+
if end > self.blocksize:
|
| 214 |
+
data = self.fetcher(0, end)
|
| 215 |
+
self.cache = data[: self.blocksize]
|
| 216 |
+
return data[start:]
|
| 217 |
+
self.cache = self.fetcher(0, self.blocksize)
|
| 218 |
+
part = self.cache[start:end]
|
| 219 |
+
if end > self.blocksize:
|
| 220 |
+
part += self.fetcher(self.blocksize, end)
|
| 221 |
+
return part
|
| 222 |
+
else:
|
| 223 |
+
return self.fetcher(start, end)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
class BlockCache(BaseCache):
|
| 227 |
+
"""
|
| 228 |
+
Cache holding memory as a set of blocks.
|
| 229 |
+
|
| 230 |
+
Requests are only ever made ``blocksize`` at a time, and are
|
| 231 |
+
stored in an LRU cache. The least recently accessed block is
|
| 232 |
+
discarded when more than ``maxblocks`` are stored.
|
| 233 |
+
|
| 234 |
+
Parameters
|
| 235 |
+
----------
|
| 236 |
+
blocksize : int
|
| 237 |
+
The number of bytes to store in each block.
|
| 238 |
+
Requests are only ever made for ``blocksize``, so this
|
| 239 |
+
should balance the overhead of making a request against
|
| 240 |
+
the granularity of the blocks.
|
| 241 |
+
fetcher : Callable
|
| 242 |
+
size : int
|
| 243 |
+
The total size of the file being cached.
|
| 244 |
+
maxblocks : int
|
| 245 |
+
The maximum number of blocks to cache for. The maximum memory
|
| 246 |
+
use for this cache is then ``blocksize * maxblocks``.
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
name = "blockcache"
|
| 250 |
+
|
| 251 |
+
def __init__(
|
| 252 |
+
self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
|
| 253 |
+
) -> None:
|
| 254 |
+
super().__init__(blocksize, fetcher, size)
|
| 255 |
+
self.nblocks = math.ceil(size / blocksize)
|
| 256 |
+
self.maxblocks = maxblocks
|
| 257 |
+
self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block)
|
| 258 |
+
|
| 259 |
+
def __repr__(self) -> str:
|
| 260 |
+
return (
|
| 261 |
+
f"<BlockCache blocksize={self.blocksize}, "
|
| 262 |
+
f"size={self.size}, nblocks={self.nblocks}>"
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
def cache_info(self):
|
| 266 |
+
"""
|
| 267 |
+
The statistics on the block cache.
|
| 268 |
+
|
| 269 |
+
Returns
|
| 270 |
+
-------
|
| 271 |
+
NamedTuple
|
| 272 |
+
Returned directly from the LRU Cache used internally.
|
| 273 |
+
"""
|
| 274 |
+
return self._fetch_block_cached.cache_info()
|
| 275 |
+
|
| 276 |
+
def __getstate__(self) -> dict[str, Any]:
|
| 277 |
+
state = self.__dict__
|
| 278 |
+
del state["_fetch_block_cached"]
|
| 279 |
+
return state
|
| 280 |
+
|
| 281 |
+
def __setstate__(self, state: dict[str, Any]) -> None:
|
| 282 |
+
self.__dict__.update(state)
|
| 283 |
+
self._fetch_block_cached = functools.lru_cache(state["maxblocks"])(
|
| 284 |
+
self._fetch_block
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
| 288 |
+
if start is None:
|
| 289 |
+
start = 0
|
| 290 |
+
if end is None:
|
| 291 |
+
end = self.size
|
| 292 |
+
if start >= self.size or start >= end:
|
| 293 |
+
return b""
|
| 294 |
+
|
| 295 |
+
# byte position -> block numbers
|
| 296 |
+
start_block_number = start // self.blocksize
|
| 297 |
+
end_block_number = end // self.blocksize
|
| 298 |
+
|
| 299 |
+
# these are cached, so safe to do multiple calls for the same start and end.
|
| 300 |
+
for block_number in range(start_block_number, end_block_number + 1):
|
| 301 |
+
self._fetch_block_cached(block_number)
|
| 302 |
+
|
| 303 |
+
return self._read_cache(
|
| 304 |
+
start,
|
| 305 |
+
end,
|
| 306 |
+
start_block_number=start_block_number,
|
| 307 |
+
end_block_number=end_block_number,
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
def _fetch_block(self, block_number: int) -> bytes:
|
| 311 |
+
"""
|
| 312 |
+
Fetch the block of data for `block_number`.
|
| 313 |
+
"""
|
| 314 |
+
if block_number > self.nblocks:
|
| 315 |
+
raise ValueError(
|
| 316 |
+
f"'block_number={block_number}' is greater than "
|
| 317 |
+
f"the number of blocks ({self.nblocks})"
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
start = block_number * self.blocksize
|
| 321 |
+
end = start + self.blocksize
|
| 322 |
+
logger.info("BlockCache fetching block %d", block_number)
|
| 323 |
+
block_contents = super()._fetch(start, end)
|
| 324 |
+
return block_contents
|
| 325 |
+
|
| 326 |
+
def _read_cache(
|
| 327 |
+
self, start: int, end: int, start_block_number: int, end_block_number: int
|
| 328 |
+
) -> bytes:
|
| 329 |
+
"""
|
| 330 |
+
Read from our block cache.
|
| 331 |
+
|
| 332 |
+
Parameters
|
| 333 |
+
----------
|
| 334 |
+
start, end : int
|
| 335 |
+
The start and end byte positions.
|
| 336 |
+
start_block_number, end_block_number : int
|
| 337 |
+
The start and end block numbers.
|
| 338 |
+
"""
|
| 339 |
+
start_pos = start % self.blocksize
|
| 340 |
+
end_pos = end % self.blocksize
|
| 341 |
+
|
| 342 |
+
if start_block_number == end_block_number:
|
| 343 |
+
block: bytes = self._fetch_block_cached(start_block_number)
|
| 344 |
+
return block[start_pos:end_pos]
|
| 345 |
+
|
| 346 |
+
else:
|
| 347 |
+
# read from the initial
|
| 348 |
+
out = []
|
| 349 |
+
out.append(self._fetch_block_cached(start_block_number)[start_pos:])
|
| 350 |
+
|
| 351 |
+
# intermediate blocks
|
| 352 |
+
# Note: it'd be nice to combine these into one big request. However
|
| 353 |
+
# that doesn't play nicely with our LRU cache.
|
| 354 |
+
for block_number in range(start_block_number + 1, end_block_number):
|
| 355 |
+
out.append(self._fetch_block_cached(block_number))
|
| 356 |
+
|
| 357 |
+
# final block
|
| 358 |
+
out.append(self._fetch_block_cached(end_block_number)[:end_pos])
|
| 359 |
+
|
| 360 |
+
return b"".join(out)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
class BytesCache(BaseCache):
|
| 364 |
+
"""Cache which holds data in a in-memory bytes object
|
| 365 |
+
|
| 366 |
+
Implements read-ahead by the block size, for semi-random reads progressing
|
| 367 |
+
through the file.
|
| 368 |
+
|
| 369 |
+
Parameters
|
| 370 |
+
----------
|
| 371 |
+
trim: bool
|
| 372 |
+
As we read more data, whether to discard the start of the buffer when
|
| 373 |
+
we are more than a blocksize ahead of it.
|
| 374 |
+
"""
|
| 375 |
+
|
| 376 |
+
name: ClassVar[str] = "bytes"
|
| 377 |
+
|
| 378 |
+
def __init__(
|
| 379 |
+
self, blocksize: int, fetcher: Fetcher, size: int, trim: bool = True
|
| 380 |
+
) -> None:
|
| 381 |
+
super().__init__(blocksize, fetcher, size)
|
| 382 |
+
self.cache = b""
|
| 383 |
+
self.start: int | None = None
|
| 384 |
+
self.end: int | None = None
|
| 385 |
+
self.trim = trim
|
| 386 |
+
|
| 387 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
| 388 |
+
# TODO: only set start/end after fetch, in case it fails?
|
| 389 |
+
# is this where retry logic might go?
|
| 390 |
+
if start is None:
|
| 391 |
+
start = 0
|
| 392 |
+
if end is None:
|
| 393 |
+
end = self.size
|
| 394 |
+
if start >= self.size or start >= end:
|
| 395 |
+
return b""
|
| 396 |
+
if (
|
| 397 |
+
self.start is not None
|
| 398 |
+
and start >= self.start
|
| 399 |
+
and self.end is not None
|
| 400 |
+
and end < self.end
|
| 401 |
+
):
|
| 402 |
+
# cache hit: we have all the required data
|
| 403 |
+
offset = start - self.start
|
| 404 |
+
return self.cache[offset : offset + end - start]
|
| 405 |
+
|
| 406 |
+
if self.blocksize:
|
| 407 |
+
bend = min(self.size, end + self.blocksize)
|
| 408 |
+
else:
|
| 409 |
+
bend = end
|
| 410 |
+
|
| 411 |
+
if bend == start or start > self.size:
|
| 412 |
+
return b""
|
| 413 |
+
|
| 414 |
+
if (self.start is None or start < self.start) and (
|
| 415 |
+
self.end is None or end > self.end
|
| 416 |
+
):
|
| 417 |
+
# First read, or extending both before and after
|
| 418 |
+
self.cache = self.fetcher(start, bend)
|
| 419 |
+
self.start = start
|
| 420 |
+
else:
|
| 421 |
+
assert self.start is not None
|
| 422 |
+
assert self.end is not None
|
| 423 |
+
|
| 424 |
+
if start < self.start:
|
| 425 |
+
if self.end is None or self.end - end > self.blocksize:
|
| 426 |
+
self.cache = self.fetcher(start, bend)
|
| 427 |
+
self.start = start
|
| 428 |
+
else:
|
| 429 |
+
new = self.fetcher(start, self.start)
|
| 430 |
+
self.start = start
|
| 431 |
+
self.cache = new + self.cache
|
| 432 |
+
elif self.end is not None and bend > self.end:
|
| 433 |
+
if self.end > self.size:
|
| 434 |
+
pass
|
| 435 |
+
elif end - self.end > self.blocksize:
|
| 436 |
+
self.cache = self.fetcher(start, bend)
|
| 437 |
+
self.start = start
|
| 438 |
+
else:
|
| 439 |
+
new = self.fetcher(self.end, bend)
|
| 440 |
+
self.cache = self.cache + new
|
| 441 |
+
|
| 442 |
+
self.end = self.start + len(self.cache)
|
| 443 |
+
offset = start - self.start
|
| 444 |
+
out = self.cache[offset : offset + end - start]
|
| 445 |
+
if self.trim:
|
| 446 |
+
num = (self.end - self.start) // (self.blocksize + 1)
|
| 447 |
+
if num > 1:
|
| 448 |
+
self.start += self.blocksize * num
|
| 449 |
+
self.cache = self.cache[self.blocksize * num :]
|
| 450 |
+
return out
|
| 451 |
+
|
| 452 |
+
def __len__(self) -> int:
|
| 453 |
+
return len(self.cache)
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
class AllBytes(BaseCache):
|
| 457 |
+
"""Cache entire contents of the file"""
|
| 458 |
+
|
| 459 |
+
name: ClassVar[str] = "all"
|
| 460 |
+
|
| 461 |
+
def __init__(
|
| 462 |
+
self,
|
| 463 |
+
blocksize: int | None = None,
|
| 464 |
+
fetcher: Fetcher | None = None,
|
| 465 |
+
size: int | None = None,
|
| 466 |
+
data: bytes | None = None,
|
| 467 |
+
) -> None:
|
| 468 |
+
super().__init__(blocksize, fetcher, size) # type: ignore[arg-type]
|
| 469 |
+
if data is None:
|
| 470 |
+
data = self.fetcher(0, self.size)
|
| 471 |
+
self.data = data
|
| 472 |
+
|
| 473 |
+
def _fetch(self, start: int | None, stop: int | None) -> bytes:
|
| 474 |
+
return self.data[start:stop]
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
class KnownPartsOfAFile(BaseCache):
|
| 478 |
+
"""
|
| 479 |
+
Cache holding known file parts.
|
| 480 |
+
|
| 481 |
+
Parameters
|
| 482 |
+
----------
|
| 483 |
+
blocksize: int
|
| 484 |
+
How far to read ahead in numbers of bytes
|
| 485 |
+
fetcher: func
|
| 486 |
+
Function of the form f(start, end) which gets bytes from remote as
|
| 487 |
+
specified
|
| 488 |
+
size: int
|
| 489 |
+
How big this file is
|
| 490 |
+
data: dict
|
| 491 |
+
A dictionary mapping explicit `(start, stop)` file-offset tuples
|
| 492 |
+
with known bytes.
|
| 493 |
+
strict: bool, default True
|
| 494 |
+
Whether to fetch reads that go beyond a known byte-range boundary.
|
| 495 |
+
If `False`, any read that ends outside a known part will be zero
|
| 496 |
+
padded. Note that zero padding will not be used for reads that
|
| 497 |
+
begin outside a known byte-range.
|
| 498 |
+
"""
|
| 499 |
+
|
| 500 |
+
name: ClassVar[str] = "parts"
|
| 501 |
+
|
| 502 |
+
def __init__(
|
| 503 |
+
self,
|
| 504 |
+
blocksize: int,
|
| 505 |
+
fetcher: Fetcher,
|
| 506 |
+
size: int,
|
| 507 |
+
data: dict[tuple[int, int], bytes] = {},
|
| 508 |
+
strict: bool = True,
|
| 509 |
+
**_: Any,
|
| 510 |
+
):
|
| 511 |
+
super().__init__(blocksize, fetcher, size)
|
| 512 |
+
self.strict = strict
|
| 513 |
+
|
| 514 |
+
# simple consolidation of contiguous blocks
|
| 515 |
+
if data:
|
| 516 |
+
old_offsets = sorted(data.keys())
|
| 517 |
+
offsets = [old_offsets[0]]
|
| 518 |
+
blocks = [data.pop(old_offsets[0])]
|
| 519 |
+
for start, stop in old_offsets[1:]:
|
| 520 |
+
start0, stop0 = offsets[-1]
|
| 521 |
+
if start == stop0:
|
| 522 |
+
offsets[-1] = (start0, stop)
|
| 523 |
+
blocks[-1] += data.pop((start, stop))
|
| 524 |
+
else:
|
| 525 |
+
offsets.append((start, stop))
|
| 526 |
+
blocks.append(data.pop((start, stop)))
|
| 527 |
+
|
| 528 |
+
self.data = dict(zip(offsets, blocks))
|
| 529 |
+
else:
|
| 530 |
+
self.data = data
|
| 531 |
+
|
| 532 |
+
def _fetch(self, start: int | None, stop: int | None) -> bytes:
|
| 533 |
+
if start is None:
|
| 534 |
+
start = 0
|
| 535 |
+
if stop is None:
|
| 536 |
+
stop = self.size
|
| 537 |
+
|
| 538 |
+
out = b""
|
| 539 |
+
for (loc0, loc1), data in self.data.items():
|
| 540 |
+
# If self.strict=False, use zero-padded data
|
| 541 |
+
# for reads beyond the end of a "known" buffer
|
| 542 |
+
if loc0 <= start < loc1:
|
| 543 |
+
off = start - loc0
|
| 544 |
+
out = data[off : off + stop - start]
|
| 545 |
+
if not self.strict or loc0 <= stop <= loc1:
|
| 546 |
+
# The request is within a known range, or
|
| 547 |
+
# it begins within a known range, and we
|
| 548 |
+
# are allowed to pad reads beyond the
|
| 549 |
+
# buffer with zero
|
| 550 |
+
out += b"\x00" * (stop - start - len(out))
|
| 551 |
+
return out
|
| 552 |
+
else:
|
| 553 |
+
# The request ends outside a known range,
|
| 554 |
+
# and we are being "strict" about reads
|
| 555 |
+
# beyond the buffer
|
| 556 |
+
start = loc1
|
| 557 |
+
break
|
| 558 |
+
|
| 559 |
+
# We only get here if there is a request outside the
|
| 560 |
+
# known parts of the file. In an ideal world, this
|
| 561 |
+
# should never happen
|
| 562 |
+
if self.fetcher is None:
|
| 563 |
+
# We cannot fetch the data, so raise an error
|
| 564 |
+
raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ")
|
| 565 |
+
# We can fetch the data, but should warn the user
|
| 566 |
+
# that this may be slow
|
| 567 |
+
warnings.warn(
|
| 568 |
+
f"Read is outside the known file parts: {(start, stop)}. "
|
| 569 |
+
f"IO/caching performance may be poor!"
|
| 570 |
+
)
|
| 571 |
+
logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}")
|
| 572 |
+
return out + super()._fetch(start, stop)
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
class UpdatableLRU(Generic[P, T]):
|
| 576 |
+
"""
|
| 577 |
+
Custom implementation of LRU cache that allows updating keys
|
| 578 |
+
|
| 579 |
+
Used by BackgroudBlockCache
|
| 580 |
+
"""
|
| 581 |
+
|
| 582 |
+
class CacheInfo(NamedTuple):
|
| 583 |
+
hits: int
|
| 584 |
+
misses: int
|
| 585 |
+
maxsize: int
|
| 586 |
+
currsize: int
|
| 587 |
+
|
| 588 |
+
def __init__(self, func: Callable[P, T], max_size: int = 128) -> None:
|
| 589 |
+
self._cache: OrderedDict[Any, T] = collections.OrderedDict()
|
| 590 |
+
self._func = func
|
| 591 |
+
self._max_size = max_size
|
| 592 |
+
self._hits = 0
|
| 593 |
+
self._misses = 0
|
| 594 |
+
self._lock = threading.Lock()
|
| 595 |
+
|
| 596 |
+
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T:
|
| 597 |
+
if kwargs:
|
| 598 |
+
raise TypeError(f"Got unexpected keyword argument {kwargs.keys()}")
|
| 599 |
+
with self._lock:
|
| 600 |
+
if args in self._cache:
|
| 601 |
+
self._cache.move_to_end(args)
|
| 602 |
+
self._hits += 1
|
| 603 |
+
return self._cache[args]
|
| 604 |
+
|
| 605 |
+
result = self._func(*args, **kwargs)
|
| 606 |
+
|
| 607 |
+
with self._lock:
|
| 608 |
+
self._cache[args] = result
|
| 609 |
+
self._misses += 1
|
| 610 |
+
if len(self._cache) > self._max_size:
|
| 611 |
+
self._cache.popitem(last=False)
|
| 612 |
+
|
| 613 |
+
return result
|
| 614 |
+
|
| 615 |
+
def is_key_cached(self, *args: Any) -> bool:
|
| 616 |
+
with self._lock:
|
| 617 |
+
return args in self._cache
|
| 618 |
+
|
| 619 |
+
def add_key(self, result: T, *args: Any) -> None:
|
| 620 |
+
with self._lock:
|
| 621 |
+
self._cache[args] = result
|
| 622 |
+
if len(self._cache) > self._max_size:
|
| 623 |
+
self._cache.popitem(last=False)
|
| 624 |
+
|
| 625 |
+
def cache_info(self) -> UpdatableLRU.CacheInfo:
|
| 626 |
+
with self._lock:
|
| 627 |
+
return self.CacheInfo(
|
| 628 |
+
maxsize=self._max_size,
|
| 629 |
+
currsize=len(self._cache),
|
| 630 |
+
hits=self._hits,
|
| 631 |
+
misses=self._misses,
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
class BackgroundBlockCache(BaseCache):
|
| 636 |
+
"""
|
| 637 |
+
Cache holding memory as a set of blocks with pre-loading of
|
| 638 |
+
the next block in the background.
|
| 639 |
+
|
| 640 |
+
Requests are only ever made ``blocksize`` at a time, and are
|
| 641 |
+
stored in an LRU cache. The least recently accessed block is
|
| 642 |
+
discarded when more than ``maxblocks`` are stored. If the
|
| 643 |
+
next block is not in cache, it is loaded in a separate thread
|
| 644 |
+
in non-blocking way.
|
| 645 |
+
|
| 646 |
+
Parameters
|
| 647 |
+
----------
|
| 648 |
+
blocksize : int
|
| 649 |
+
The number of bytes to store in each block.
|
| 650 |
+
Requests are only ever made for ``blocksize``, so this
|
| 651 |
+
should balance the overhead of making a request against
|
| 652 |
+
the granularity of the blocks.
|
| 653 |
+
fetcher : Callable
|
| 654 |
+
size : int
|
| 655 |
+
The total size of the file being cached.
|
| 656 |
+
maxblocks : int
|
| 657 |
+
The maximum number of blocks to cache for. The maximum memory
|
| 658 |
+
use for this cache is then ``blocksize * maxblocks``.
|
| 659 |
+
"""
|
| 660 |
+
|
| 661 |
+
name: ClassVar[str] = "background"
|
| 662 |
+
|
| 663 |
+
def __init__(
|
| 664 |
+
self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
|
| 665 |
+
) -> None:
|
| 666 |
+
super().__init__(blocksize, fetcher, size)
|
| 667 |
+
self.nblocks = math.ceil(size / blocksize)
|
| 668 |
+
self.maxblocks = maxblocks
|
| 669 |
+
self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks)
|
| 670 |
+
|
| 671 |
+
self._thread_executor = ThreadPoolExecutor(max_workers=1)
|
| 672 |
+
self._fetch_future_block_number: int | None = None
|
| 673 |
+
self._fetch_future: Future[bytes] | None = None
|
| 674 |
+
self._fetch_future_lock = threading.Lock()
|
| 675 |
+
|
| 676 |
+
def __repr__(self) -> str:
|
| 677 |
+
return (
|
| 678 |
+
f"<BackgroundBlockCache blocksize={self.blocksize}, "
|
| 679 |
+
f"size={self.size}, nblocks={self.nblocks}>"
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
def cache_info(self) -> UpdatableLRU.CacheInfo:
|
| 683 |
+
"""
|
| 684 |
+
The statistics on the block cache.
|
| 685 |
+
|
| 686 |
+
Returns
|
| 687 |
+
-------
|
| 688 |
+
NamedTuple
|
| 689 |
+
Returned directly from the LRU Cache used internally.
|
| 690 |
+
"""
|
| 691 |
+
return self._fetch_block_cached.cache_info()
|
| 692 |
+
|
| 693 |
+
def __getstate__(self) -> dict[str, Any]:
|
| 694 |
+
state = self.__dict__
|
| 695 |
+
del state["_fetch_block_cached"]
|
| 696 |
+
del state["_thread_executor"]
|
| 697 |
+
del state["_fetch_future_block_number"]
|
| 698 |
+
del state["_fetch_future"]
|
| 699 |
+
del state["_fetch_future_lock"]
|
| 700 |
+
return state
|
| 701 |
+
|
| 702 |
+
def __setstate__(self, state) -> None:
|
| 703 |
+
self.__dict__.update(state)
|
| 704 |
+
self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"])
|
| 705 |
+
self._thread_executor = ThreadPoolExecutor(max_workers=1)
|
| 706 |
+
self._fetch_future_block_number = None
|
| 707 |
+
self._fetch_future = None
|
| 708 |
+
self._fetch_future_lock = threading.Lock()
|
| 709 |
+
|
| 710 |
+
def _fetch(self, start: int | None, end: int | None) -> bytes:
|
| 711 |
+
if start is None:
|
| 712 |
+
start = 0
|
| 713 |
+
if end is None:
|
| 714 |
+
end = self.size
|
| 715 |
+
if start >= self.size or start >= end:
|
| 716 |
+
return b""
|
| 717 |
+
|
| 718 |
+
# byte position -> block numbers
|
| 719 |
+
start_block_number = start // self.blocksize
|
| 720 |
+
end_block_number = end // self.blocksize
|
| 721 |
+
|
| 722 |
+
fetch_future_block_number = None
|
| 723 |
+
fetch_future = None
|
| 724 |
+
with self._fetch_future_lock:
|
| 725 |
+
# Background thread is running. Check we we can or must join it.
|
| 726 |
+
if self._fetch_future is not None:
|
| 727 |
+
assert self._fetch_future_block_number is not None
|
| 728 |
+
if self._fetch_future.done():
|
| 729 |
+
logger.info("BlockCache joined background fetch without waiting.")
|
| 730 |
+
self._fetch_block_cached.add_key(
|
| 731 |
+
self._fetch_future.result(), self._fetch_future_block_number
|
| 732 |
+
)
|
| 733 |
+
# Cleanup the fetch variables. Done with fetching the block.
|
| 734 |
+
self._fetch_future_block_number = None
|
| 735 |
+
self._fetch_future = None
|
| 736 |
+
else:
|
| 737 |
+
# Must join if we need the block for the current fetch
|
| 738 |
+
must_join = bool(
|
| 739 |
+
start_block_number
|
| 740 |
+
<= self._fetch_future_block_number
|
| 741 |
+
<= end_block_number
|
| 742 |
+
)
|
| 743 |
+
if must_join:
|
| 744 |
+
# Copy to the local variables to release lock
|
| 745 |
+
# before waiting for result
|
| 746 |
+
fetch_future_block_number = self._fetch_future_block_number
|
| 747 |
+
fetch_future = self._fetch_future
|
| 748 |
+
|
| 749 |
+
# Cleanup the fetch variables. Have a local copy.
|
| 750 |
+
self._fetch_future_block_number = None
|
| 751 |
+
self._fetch_future = None
|
| 752 |
+
|
| 753 |
+
# Need to wait for the future for the current read
|
| 754 |
+
if fetch_future is not None:
|
| 755 |
+
logger.info("BlockCache waiting for background fetch.")
|
| 756 |
+
# Wait until result and put it in cache
|
| 757 |
+
self._fetch_block_cached.add_key(
|
| 758 |
+
fetch_future.result(), fetch_future_block_number
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
# these are cached, so safe to do multiple calls for the same start and end.
|
| 762 |
+
for block_number in range(start_block_number, end_block_number + 1):
|
| 763 |
+
self._fetch_block_cached(block_number)
|
| 764 |
+
|
| 765 |
+
# fetch next block in the background if nothing is running in the background,
|
| 766 |
+
# the block is within file and it is not already cached
|
| 767 |
+
end_block_plus_1 = end_block_number + 1
|
| 768 |
+
with self._fetch_future_lock:
|
| 769 |
+
if (
|
| 770 |
+
self._fetch_future is None
|
| 771 |
+
and end_block_plus_1 <= self.nblocks
|
| 772 |
+
and not self._fetch_block_cached.is_key_cached(end_block_plus_1)
|
| 773 |
+
):
|
| 774 |
+
self._fetch_future_block_number = end_block_plus_1
|
| 775 |
+
self._fetch_future = self._thread_executor.submit(
|
| 776 |
+
self._fetch_block, end_block_plus_1, "async"
|
| 777 |
+
)
|
| 778 |
+
|
| 779 |
+
return self._read_cache(
|
| 780 |
+
start,
|
| 781 |
+
end,
|
| 782 |
+
start_block_number=start_block_number,
|
| 783 |
+
end_block_number=end_block_number,
|
| 784 |
+
)
|
| 785 |
+
|
| 786 |
+
def _fetch_block(self, block_number: int, log_info: str = "sync") -> bytes:
|
| 787 |
+
"""
|
| 788 |
+
Fetch the block of data for `block_number`.
|
| 789 |
+
"""
|
| 790 |
+
if block_number > self.nblocks:
|
| 791 |
+
raise ValueError(
|
| 792 |
+
f"'block_number={block_number}' is greater than "
|
| 793 |
+
f"the number of blocks ({self.nblocks})"
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
start = block_number * self.blocksize
|
| 797 |
+
end = start + self.blocksize
|
| 798 |
+
logger.info("BlockCache fetching block (%s) %d", log_info, block_number)
|
| 799 |
+
block_contents = super()._fetch(start, end)
|
| 800 |
+
return block_contents
|
| 801 |
+
|
| 802 |
+
def _read_cache(
|
| 803 |
+
self, start: int, end: int, start_block_number: int, end_block_number: int
|
| 804 |
+
) -> bytes:
|
| 805 |
+
"""
|
| 806 |
+
Read from our block cache.
|
| 807 |
+
|
| 808 |
+
Parameters
|
| 809 |
+
----------
|
| 810 |
+
start, end : int
|
| 811 |
+
The start and end byte positions.
|
| 812 |
+
start_block_number, end_block_number : int
|
| 813 |
+
The start and end block numbers.
|
| 814 |
+
"""
|
| 815 |
+
start_pos = start % self.blocksize
|
| 816 |
+
end_pos = end % self.blocksize
|
| 817 |
+
|
| 818 |
+
if start_block_number == end_block_number:
|
| 819 |
+
block = self._fetch_block_cached(start_block_number)
|
| 820 |
+
return block[start_pos:end_pos]
|
| 821 |
+
|
| 822 |
+
else:
|
| 823 |
+
# read from the initial
|
| 824 |
+
out = []
|
| 825 |
+
out.append(self._fetch_block_cached(start_block_number)[start_pos:])
|
| 826 |
+
|
| 827 |
+
# intermediate blocks
|
| 828 |
+
# Note: it'd be nice to combine these into one big request. However
|
| 829 |
+
# that doesn't play nicely with our LRU cache.
|
| 830 |
+
for block_number in range(start_block_number + 1, end_block_number):
|
| 831 |
+
out.append(self._fetch_block_cached(block_number))
|
| 832 |
+
|
| 833 |
+
# final block
|
| 834 |
+
out.append(self._fetch_block_cached(end_block_number)[:end_pos])
|
| 835 |
+
|
| 836 |
+
return b"".join(out)
|
| 837 |
+
|
| 838 |
+
|
| 839 |
+
caches: dict[str | None, type[BaseCache]] = {
|
| 840 |
+
# one custom case
|
| 841 |
+
None: BaseCache,
|
| 842 |
+
}
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
def register_cache(cls: type[BaseCache], clobber: bool = False) -> None:
|
| 846 |
+
"""'Register' cache implementation.
|
| 847 |
+
|
| 848 |
+
Parameters
|
| 849 |
+
----------
|
| 850 |
+
clobber: bool, optional
|
| 851 |
+
If set to True (default is False) - allow to overwrite existing
|
| 852 |
+
entry.
|
| 853 |
+
|
| 854 |
+
Raises
|
| 855 |
+
------
|
| 856 |
+
ValueError
|
| 857 |
+
"""
|
| 858 |
+
name = cls.name
|
| 859 |
+
if not clobber and name in caches:
|
| 860 |
+
raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}")
|
| 861 |
+
caches[name] = cls
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
for c in (
|
| 865 |
+
BaseCache,
|
| 866 |
+
MMapCache,
|
| 867 |
+
BytesCache,
|
| 868 |
+
ReadAheadCache,
|
| 869 |
+
BlockCache,
|
| 870 |
+
FirstChunkCache,
|
| 871 |
+
AllBytes,
|
| 872 |
+
KnownPartsOfAFile,
|
| 873 |
+
BackgroundBlockCache,
|
| 874 |
+
):
|
| 875 |
+
register_cache(c)
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/exceptions.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
fsspec user-defined exception classes
|
| 3 |
+
"""
|
| 4 |
+
import asyncio
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BlocksizeMismatchError(ValueError):
|
| 8 |
+
"""
|
| 9 |
+
Raised when a cached file is opened with a different blocksize than it was
|
| 10 |
+
written with
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class FSTimeoutError(asyncio.TimeoutError):
|
| 15 |
+
"""
|
| 16 |
+
Raised when a fsspec function timed out occurs
|
| 17 |
+
"""
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-310.pyc
ADDED
|
Binary file (1.57 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc
ADDED
|
Binary file (10.4 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-310.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/fsspec/tests/abstract/copy.py
ADDED
|
@@ -0,0 +1,557 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from hashlib import md5
|
| 2 |
+
from itertools import product
|
| 3 |
+
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class AbstractCopyTests:
|
| 10 |
+
def test_copy_file_to_existing_directory(
|
| 11 |
+
self,
|
| 12 |
+
fs,
|
| 13 |
+
fs_join,
|
| 14 |
+
fs_bulk_operations_scenario_0,
|
| 15 |
+
fs_target,
|
| 16 |
+
supports_empty_directories,
|
| 17 |
+
):
|
| 18 |
+
# Copy scenario 1a
|
| 19 |
+
source = fs_bulk_operations_scenario_0
|
| 20 |
+
|
| 21 |
+
target = fs_target
|
| 22 |
+
fs.mkdir(target)
|
| 23 |
+
if not supports_empty_directories:
|
| 24 |
+
# Force target directory to exist by adding a dummy file
|
| 25 |
+
fs.touch(fs_join(target, "dummy"))
|
| 26 |
+
assert fs.isdir(target)
|
| 27 |
+
|
| 28 |
+
target_file2 = fs_join(target, "file2")
|
| 29 |
+
target_subfile1 = fs_join(target, "subfile1")
|
| 30 |
+
|
| 31 |
+
# Copy from source directory
|
| 32 |
+
fs.cp(fs_join(source, "file2"), target)
|
| 33 |
+
assert fs.isfile(target_file2)
|
| 34 |
+
|
| 35 |
+
# Copy from sub directory
|
| 36 |
+
fs.cp(fs_join(source, "subdir", "subfile1"), target)
|
| 37 |
+
assert fs.isfile(target_subfile1)
|
| 38 |
+
|
| 39 |
+
# Remove copied files
|
| 40 |
+
fs.rm([target_file2, target_subfile1])
|
| 41 |
+
assert not fs.exists(target_file2)
|
| 42 |
+
assert not fs.exists(target_subfile1)
|
| 43 |
+
|
| 44 |
+
# Repeat with trailing slash on target
|
| 45 |
+
fs.cp(fs_join(source, "file2"), target + "/")
|
| 46 |
+
assert fs.isdir(target)
|
| 47 |
+
assert fs.isfile(target_file2)
|
| 48 |
+
|
| 49 |
+
fs.cp(fs_join(source, "subdir", "subfile1"), target + "/")
|
| 50 |
+
assert fs.isfile(target_subfile1)
|
| 51 |
+
|
| 52 |
+
def test_copy_file_to_new_directory(
|
| 53 |
+
self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
|
| 54 |
+
):
|
| 55 |
+
# Copy scenario 1b
|
| 56 |
+
source = fs_bulk_operations_scenario_0
|
| 57 |
+
|
| 58 |
+
target = fs_target
|
| 59 |
+
fs.mkdir(target)
|
| 60 |
+
|
| 61 |
+
fs.cp(
|
| 62 |
+
fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir/")
|
| 63 |
+
) # Note trailing slash
|
| 64 |
+
assert fs.isdir(target)
|
| 65 |
+
assert fs.isdir(fs_join(target, "newdir"))
|
| 66 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile1"))
|
| 67 |
+
|
| 68 |
+
def test_copy_file_to_file_in_existing_directory(
|
| 69 |
+
self,
|
| 70 |
+
fs,
|
| 71 |
+
fs_join,
|
| 72 |
+
fs_bulk_operations_scenario_0,
|
| 73 |
+
fs_target,
|
| 74 |
+
supports_empty_directories,
|
| 75 |
+
):
|
| 76 |
+
# Copy scenario 1c
|
| 77 |
+
source = fs_bulk_operations_scenario_0
|
| 78 |
+
|
| 79 |
+
target = fs_target
|
| 80 |
+
fs.mkdir(target)
|
| 81 |
+
if not supports_empty_directories:
|
| 82 |
+
# Force target directory to exist by adding a dummy file
|
| 83 |
+
fs.touch(fs_join(target, "dummy"))
|
| 84 |
+
assert fs.isdir(target)
|
| 85 |
+
|
| 86 |
+
fs.cp(fs_join(source, "subdir", "subfile1"), fs_join(target, "newfile"))
|
| 87 |
+
assert fs.isfile(fs_join(target, "newfile"))
|
| 88 |
+
|
| 89 |
+
def test_copy_file_to_file_in_new_directory(
|
| 90 |
+
self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
|
| 91 |
+
):
|
| 92 |
+
# Copy scenario 1d
|
| 93 |
+
source = fs_bulk_operations_scenario_0
|
| 94 |
+
|
| 95 |
+
target = fs_target
|
| 96 |
+
fs.mkdir(target)
|
| 97 |
+
|
| 98 |
+
fs.cp(
|
| 99 |
+
fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir", "newfile")
|
| 100 |
+
)
|
| 101 |
+
assert fs.isdir(fs_join(target, "newdir"))
|
| 102 |
+
assert fs.isfile(fs_join(target, "newdir", "newfile"))
|
| 103 |
+
|
| 104 |
+
def test_copy_directory_to_existing_directory(
|
| 105 |
+
self,
|
| 106 |
+
fs,
|
| 107 |
+
fs_join,
|
| 108 |
+
fs_bulk_operations_scenario_0,
|
| 109 |
+
fs_target,
|
| 110 |
+
supports_empty_directories,
|
| 111 |
+
):
|
| 112 |
+
# Copy scenario 1e
|
| 113 |
+
source = fs_bulk_operations_scenario_0
|
| 114 |
+
|
| 115 |
+
target = fs_target
|
| 116 |
+
fs.mkdir(target)
|
| 117 |
+
if not supports_empty_directories:
|
| 118 |
+
# Force target directory to exist by adding a dummy file
|
| 119 |
+
dummy = fs_join(target, "dummy")
|
| 120 |
+
fs.touch(dummy)
|
| 121 |
+
assert fs.isdir(target)
|
| 122 |
+
|
| 123 |
+
for source_slash, target_slash in zip([False, True], [False, True]):
|
| 124 |
+
s = fs_join(source, "subdir")
|
| 125 |
+
if source_slash:
|
| 126 |
+
s += "/"
|
| 127 |
+
t = target + "/" if target_slash else target
|
| 128 |
+
|
| 129 |
+
# Without recursive does nothing
|
| 130 |
+
fs.cp(s, t)
|
| 131 |
+
assert fs.ls(target, detail=False) == (
|
| 132 |
+
[] if supports_empty_directories else [dummy]
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# With recursive
|
| 136 |
+
fs.cp(s, t, recursive=True)
|
| 137 |
+
if source_slash:
|
| 138 |
+
assert fs.isfile(fs_join(target, "subfile1"))
|
| 139 |
+
assert fs.isfile(fs_join(target, "subfile2"))
|
| 140 |
+
assert fs.isdir(fs_join(target, "nesteddir"))
|
| 141 |
+
assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))
|
| 142 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 143 |
+
|
| 144 |
+
fs.rm(
|
| 145 |
+
[
|
| 146 |
+
fs_join(target, "subfile1"),
|
| 147 |
+
fs_join(target, "subfile2"),
|
| 148 |
+
fs_join(target, "nesteddir"),
|
| 149 |
+
],
|
| 150 |
+
recursive=True,
|
| 151 |
+
)
|
| 152 |
+
else:
|
| 153 |
+
assert fs.isdir(fs_join(target, "subdir"))
|
| 154 |
+
assert fs.isfile(fs_join(target, "subdir", "subfile1"))
|
| 155 |
+
assert fs.isfile(fs_join(target, "subdir", "subfile2"))
|
| 156 |
+
assert fs.isdir(fs_join(target, "subdir", "nesteddir"))
|
| 157 |
+
assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile"))
|
| 158 |
+
|
| 159 |
+
fs.rm(fs_join(target, "subdir"), recursive=True)
|
| 160 |
+
assert fs.ls(target, detail=False) == (
|
| 161 |
+
[] if supports_empty_directories else [dummy]
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
# Limit recursive by maxdepth
|
| 165 |
+
fs.cp(s, t, recursive=True, maxdepth=1)
|
| 166 |
+
if source_slash:
|
| 167 |
+
assert fs.isfile(fs_join(target, "subfile1"))
|
| 168 |
+
assert fs.isfile(fs_join(target, "subfile2"))
|
| 169 |
+
assert not fs.exists(fs_join(target, "nesteddir"))
|
| 170 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 171 |
+
|
| 172 |
+
fs.rm(
|
| 173 |
+
[
|
| 174 |
+
fs_join(target, "subfile1"),
|
| 175 |
+
fs_join(target, "subfile2"),
|
| 176 |
+
],
|
| 177 |
+
recursive=True,
|
| 178 |
+
)
|
| 179 |
+
else:
|
| 180 |
+
assert fs.isdir(fs_join(target, "subdir"))
|
| 181 |
+
assert fs.isfile(fs_join(target, "subdir", "subfile1"))
|
| 182 |
+
assert fs.isfile(fs_join(target, "subdir", "subfile2"))
|
| 183 |
+
assert not fs.exists(fs_join(target, "subdir", "nesteddir"))
|
| 184 |
+
|
| 185 |
+
fs.rm(fs_join(target, "subdir"), recursive=True)
|
| 186 |
+
assert fs.ls(target, detail=False) == (
|
| 187 |
+
[] if supports_empty_directories else [dummy]
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
def test_copy_directory_to_new_directory(
|
| 191 |
+
self,
|
| 192 |
+
fs,
|
| 193 |
+
fs_join,
|
| 194 |
+
fs_bulk_operations_scenario_0,
|
| 195 |
+
fs_target,
|
| 196 |
+
supports_empty_directories,
|
| 197 |
+
):
|
| 198 |
+
# Copy scenario 1f
|
| 199 |
+
source = fs_bulk_operations_scenario_0
|
| 200 |
+
|
| 201 |
+
target = fs_target
|
| 202 |
+
fs.mkdir(target)
|
| 203 |
+
|
| 204 |
+
for source_slash, target_slash in zip([False, True], [False, True]):
|
| 205 |
+
s = fs_join(source, "subdir")
|
| 206 |
+
if source_slash:
|
| 207 |
+
s += "/"
|
| 208 |
+
t = fs_join(target, "newdir")
|
| 209 |
+
if target_slash:
|
| 210 |
+
t += "/"
|
| 211 |
+
|
| 212 |
+
# Without recursive does nothing
|
| 213 |
+
fs.cp(s, t)
|
| 214 |
+
if supports_empty_directories:
|
| 215 |
+
assert fs.ls(target) == []
|
| 216 |
+
else:
|
| 217 |
+
with pytest.raises(FileNotFoundError):
|
| 218 |
+
fs.ls(target)
|
| 219 |
+
|
| 220 |
+
# With recursive
|
| 221 |
+
fs.cp(s, t, recursive=True)
|
| 222 |
+
assert fs.isdir(fs_join(target, "newdir"))
|
| 223 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile1"))
|
| 224 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile2"))
|
| 225 |
+
assert fs.isdir(fs_join(target, "newdir", "nesteddir"))
|
| 226 |
+
assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))
|
| 227 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 228 |
+
|
| 229 |
+
fs.rm(fs_join(target, "newdir"), recursive=True)
|
| 230 |
+
assert not fs.exists(fs_join(target, "newdir"))
|
| 231 |
+
|
| 232 |
+
# Limit recursive by maxdepth
|
| 233 |
+
fs.cp(s, t, recursive=True, maxdepth=1)
|
| 234 |
+
assert fs.isdir(fs_join(target, "newdir"))
|
| 235 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile1"))
|
| 236 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile2"))
|
| 237 |
+
assert not fs.exists(fs_join(target, "newdir", "nesteddir"))
|
| 238 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 239 |
+
|
| 240 |
+
fs.rm(fs_join(target, "newdir"), recursive=True)
|
| 241 |
+
assert not fs.exists(fs_join(target, "newdir"))
|
| 242 |
+
|
| 243 |
+
def test_copy_glob_to_existing_directory(
|
| 244 |
+
self,
|
| 245 |
+
fs,
|
| 246 |
+
fs_join,
|
| 247 |
+
fs_bulk_operations_scenario_0,
|
| 248 |
+
fs_target,
|
| 249 |
+
supports_empty_directories,
|
| 250 |
+
):
|
| 251 |
+
# Copy scenario 1g
|
| 252 |
+
source = fs_bulk_operations_scenario_0
|
| 253 |
+
|
| 254 |
+
target = fs_target
|
| 255 |
+
fs.mkdir(target)
|
| 256 |
+
if not supports_empty_directories:
|
| 257 |
+
# Force target directory to exist by adding a dummy file
|
| 258 |
+
dummy = fs_join(target, "dummy")
|
| 259 |
+
fs.touch(dummy)
|
| 260 |
+
assert fs.isdir(target)
|
| 261 |
+
|
| 262 |
+
for target_slash in [False, True]:
|
| 263 |
+
t = target + "/" if target_slash else target
|
| 264 |
+
|
| 265 |
+
# Without recursive
|
| 266 |
+
fs.cp(fs_join(source, "subdir", "*"), t)
|
| 267 |
+
assert fs.isfile(fs_join(target, "subfile1"))
|
| 268 |
+
assert fs.isfile(fs_join(target, "subfile2"))
|
| 269 |
+
assert not fs.isdir(fs_join(target, "nesteddir"))
|
| 270 |
+
assert not fs.exists(fs_join(target, "nesteddir", "nestedfile"))
|
| 271 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 272 |
+
|
| 273 |
+
fs.rm(
|
| 274 |
+
[
|
| 275 |
+
fs_join(target, "subfile1"),
|
| 276 |
+
fs_join(target, "subfile2"),
|
| 277 |
+
],
|
| 278 |
+
recursive=True,
|
| 279 |
+
)
|
| 280 |
+
assert fs.ls(target, detail=False) == (
|
| 281 |
+
[] if supports_empty_directories else [dummy]
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
# With recursive
|
| 285 |
+
for glob, recursive in zip(["*", "**"], [True, False]):
|
| 286 |
+
fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive)
|
| 287 |
+
assert fs.isfile(fs_join(target, "subfile1"))
|
| 288 |
+
assert fs.isfile(fs_join(target, "subfile2"))
|
| 289 |
+
assert fs.isdir(fs_join(target, "nesteddir"))
|
| 290 |
+
assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))
|
| 291 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 292 |
+
|
| 293 |
+
fs.rm(
|
| 294 |
+
[
|
| 295 |
+
fs_join(target, "subfile1"),
|
| 296 |
+
fs_join(target, "subfile2"),
|
| 297 |
+
fs_join(target, "nesteddir"),
|
| 298 |
+
],
|
| 299 |
+
recursive=True,
|
| 300 |
+
)
|
| 301 |
+
assert fs.ls(target, detail=False) == (
|
| 302 |
+
[] if supports_empty_directories else [dummy]
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
# Limit recursive by maxdepth
|
| 306 |
+
fs.cp(
|
| 307 |
+
fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1
|
| 308 |
+
)
|
| 309 |
+
assert fs.isfile(fs_join(target, "subfile1"))
|
| 310 |
+
assert fs.isfile(fs_join(target, "subfile2"))
|
| 311 |
+
assert not fs.exists(fs_join(target, "nesteddir"))
|
| 312 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 313 |
+
|
| 314 |
+
fs.rm(
|
| 315 |
+
[
|
| 316 |
+
fs_join(target, "subfile1"),
|
| 317 |
+
fs_join(target, "subfile2"),
|
| 318 |
+
],
|
| 319 |
+
recursive=True,
|
| 320 |
+
)
|
| 321 |
+
assert fs.ls(target, detail=False) == (
|
| 322 |
+
[] if supports_empty_directories else [dummy]
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
def test_copy_glob_to_new_directory(
|
| 326 |
+
self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
|
| 327 |
+
):
|
| 328 |
+
# Copy scenario 1h
|
| 329 |
+
source = fs_bulk_operations_scenario_0
|
| 330 |
+
|
| 331 |
+
target = fs_target
|
| 332 |
+
fs.mkdir(target)
|
| 333 |
+
|
| 334 |
+
for target_slash in [False, True]:
|
| 335 |
+
t = fs_join(target, "newdir")
|
| 336 |
+
if target_slash:
|
| 337 |
+
t += "/"
|
| 338 |
+
|
| 339 |
+
# Without recursive
|
| 340 |
+
fs.cp(fs_join(source, "subdir", "*"), t)
|
| 341 |
+
assert fs.isdir(fs_join(target, "newdir"))
|
| 342 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile1"))
|
| 343 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile2"))
|
| 344 |
+
assert not fs.exists(fs_join(target, "newdir", "nesteddir"))
|
| 345 |
+
assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile"))
|
| 346 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 347 |
+
assert not fs.exists(fs_join(target, "newdir", "subdir"))
|
| 348 |
+
|
| 349 |
+
fs.rm(fs_join(target, "newdir"), recursive=True)
|
| 350 |
+
assert not fs.exists(fs_join(target, "newdir"))
|
| 351 |
+
|
| 352 |
+
# With recursive
|
| 353 |
+
for glob, recursive in zip(["*", "**"], [True, False]):
|
| 354 |
+
fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive)
|
| 355 |
+
assert fs.isdir(fs_join(target, "newdir"))
|
| 356 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile1"))
|
| 357 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile2"))
|
| 358 |
+
assert fs.isdir(fs_join(target, "newdir", "nesteddir"))
|
| 359 |
+
assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))
|
| 360 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 361 |
+
assert not fs.exists(fs_join(target, "newdir", "subdir"))
|
| 362 |
+
|
| 363 |
+
fs.rm(fs_join(target, "newdir"), recursive=True)
|
| 364 |
+
assert not fs.exists(fs_join(target, "newdir"))
|
| 365 |
+
|
| 366 |
+
# Limit recursive by maxdepth
|
| 367 |
+
fs.cp(
|
| 368 |
+
fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1
|
| 369 |
+
)
|
| 370 |
+
assert fs.isdir(fs_join(target, "newdir"))
|
| 371 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile1"))
|
| 372 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile2"))
|
| 373 |
+
assert not fs.exists(fs_join(target, "newdir", "nesteddir"))
|
| 374 |
+
assert not fs.exists(fs_join(target, "subdir"))
|
| 375 |
+
assert not fs.exists(fs_join(target, "newdir", "subdir"))
|
| 376 |
+
|
| 377 |
+
fs.rm(fs_join(target, "newdir"), recursive=True)
|
| 378 |
+
assert not fs.exists(fs_join(target, "newdir"))
|
| 379 |
+
|
| 380 |
+
@pytest.mark.parametrize(
|
| 381 |
+
GLOB_EDGE_CASES_TESTS["argnames"],
|
| 382 |
+
GLOB_EDGE_CASES_TESTS["argvalues"],
|
| 383 |
+
)
|
| 384 |
+
def test_copy_glob_edge_cases(
|
| 385 |
+
self,
|
| 386 |
+
path,
|
| 387 |
+
recursive,
|
| 388 |
+
maxdepth,
|
| 389 |
+
expected,
|
| 390 |
+
fs,
|
| 391 |
+
fs_join,
|
| 392 |
+
fs_glob_edge_cases_files,
|
| 393 |
+
fs_target,
|
| 394 |
+
fs_sanitize_path,
|
| 395 |
+
):
|
| 396 |
+
# Copy scenario 1g
|
| 397 |
+
source = fs_glob_edge_cases_files
|
| 398 |
+
|
| 399 |
+
target = fs_target
|
| 400 |
+
|
| 401 |
+
for new_dir, target_slash in product([True, False], [True, False]):
|
| 402 |
+
fs.mkdir(target)
|
| 403 |
+
|
| 404 |
+
t = fs_join(target, "newdir") if new_dir else target
|
| 405 |
+
t = t + "/" if target_slash else t
|
| 406 |
+
|
| 407 |
+
fs.copy(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth)
|
| 408 |
+
|
| 409 |
+
output = fs.find(target)
|
| 410 |
+
if new_dir:
|
| 411 |
+
prefixed_expected = [
|
| 412 |
+
fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected
|
| 413 |
+
]
|
| 414 |
+
else:
|
| 415 |
+
prefixed_expected = [
|
| 416 |
+
fs_sanitize_path(fs_join(target, p)) for p in expected
|
| 417 |
+
]
|
| 418 |
+
assert sorted(output) == sorted(prefixed_expected)
|
| 419 |
+
|
| 420 |
+
try:
|
| 421 |
+
fs.rm(target, recursive=True)
|
| 422 |
+
except FileNotFoundError:
|
| 423 |
+
pass
|
| 424 |
+
|
| 425 |
+
def test_copy_list_of_files_to_existing_directory(
|
| 426 |
+
self,
|
| 427 |
+
fs,
|
| 428 |
+
fs_join,
|
| 429 |
+
fs_bulk_operations_scenario_0,
|
| 430 |
+
fs_target,
|
| 431 |
+
supports_empty_directories,
|
| 432 |
+
):
|
| 433 |
+
# Copy scenario 2a
|
| 434 |
+
source = fs_bulk_operations_scenario_0
|
| 435 |
+
|
| 436 |
+
target = fs_target
|
| 437 |
+
fs.mkdir(target)
|
| 438 |
+
if not supports_empty_directories:
|
| 439 |
+
# Force target directory to exist by adding a dummy file
|
| 440 |
+
dummy = fs_join(target, "dummy")
|
| 441 |
+
fs.touch(dummy)
|
| 442 |
+
assert fs.isdir(target)
|
| 443 |
+
|
| 444 |
+
source_files = [
|
| 445 |
+
fs_join(source, "file1"),
|
| 446 |
+
fs_join(source, "file2"),
|
| 447 |
+
fs_join(source, "subdir", "subfile1"),
|
| 448 |
+
]
|
| 449 |
+
|
| 450 |
+
for target_slash in [False, True]:
|
| 451 |
+
t = target + "/" if target_slash else target
|
| 452 |
+
|
| 453 |
+
fs.cp(source_files, t)
|
| 454 |
+
assert fs.isfile(fs_join(target, "file1"))
|
| 455 |
+
assert fs.isfile(fs_join(target, "file2"))
|
| 456 |
+
assert fs.isfile(fs_join(target, "subfile1"))
|
| 457 |
+
|
| 458 |
+
fs.rm(
|
| 459 |
+
[
|
| 460 |
+
fs_join(target, "file1"),
|
| 461 |
+
fs_join(target, "file2"),
|
| 462 |
+
fs_join(target, "subfile1"),
|
| 463 |
+
],
|
| 464 |
+
recursive=True,
|
| 465 |
+
)
|
| 466 |
+
assert fs.ls(target, detail=False) == (
|
| 467 |
+
[] if supports_empty_directories else [dummy]
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
def test_copy_list_of_files_to_new_directory(
|
| 471 |
+
self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
|
| 472 |
+
):
|
| 473 |
+
# Copy scenario 2b
|
| 474 |
+
source = fs_bulk_operations_scenario_0
|
| 475 |
+
|
| 476 |
+
target = fs_target
|
| 477 |
+
fs.mkdir(target)
|
| 478 |
+
|
| 479 |
+
source_files = [
|
| 480 |
+
fs_join(source, "file1"),
|
| 481 |
+
fs_join(source, "file2"),
|
| 482 |
+
fs_join(source, "subdir", "subfile1"),
|
| 483 |
+
]
|
| 484 |
+
|
| 485 |
+
fs.cp(source_files, fs_join(target, "newdir") + "/") # Note trailing slash
|
| 486 |
+
assert fs.isdir(fs_join(target, "newdir"))
|
| 487 |
+
assert fs.isfile(fs_join(target, "newdir", "file1"))
|
| 488 |
+
assert fs.isfile(fs_join(target, "newdir", "file2"))
|
| 489 |
+
assert fs.isfile(fs_join(target, "newdir", "subfile1"))
|
| 490 |
+
|
| 491 |
+
def test_copy_two_files_new_directory(
|
| 492 |
+
self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
|
| 493 |
+
):
|
| 494 |
+
# This is a duplicate of test_copy_list_of_files_to_new_directory and
|
| 495 |
+
# can eventually be removed.
|
| 496 |
+
source = fs_bulk_operations_scenario_0
|
| 497 |
+
|
| 498 |
+
target = fs_target
|
| 499 |
+
assert not fs.exists(target)
|
| 500 |
+
fs.cp([fs_join(source, "file1"), fs_join(source, "file2")], target)
|
| 501 |
+
|
| 502 |
+
assert fs.isdir(target)
|
| 503 |
+
assert fs.isfile(fs_join(target, "file1"))
|
| 504 |
+
assert fs.isfile(fs_join(target, "file2"))
|
| 505 |
+
|
| 506 |
+
def test_copy_directory_without_files_with_same_name_prefix(
|
| 507 |
+
self,
|
| 508 |
+
fs,
|
| 509 |
+
fs_join,
|
| 510 |
+
fs_target,
|
| 511 |
+
fs_dir_and_file_with_same_name_prefix,
|
| 512 |
+
supports_empty_directories,
|
| 513 |
+
):
|
| 514 |
+
# Create the test dirs
|
| 515 |
+
source = fs_dir_and_file_with_same_name_prefix
|
| 516 |
+
target = fs_target
|
| 517 |
+
|
| 518 |
+
# Test without glob
|
| 519 |
+
fs.cp(fs_join(source, "subdir"), target, recursive=True)
|
| 520 |
+
|
| 521 |
+
assert fs.isfile(fs_join(target, "subfile.txt"))
|
| 522 |
+
assert not fs.isfile(fs_join(target, "subdir.txt"))
|
| 523 |
+
|
| 524 |
+
fs.rm([fs_join(target, "subfile.txt")])
|
| 525 |
+
if supports_empty_directories:
|
| 526 |
+
assert fs.ls(target) == []
|
| 527 |
+
else:
|
| 528 |
+
assert not fs.exists(target)
|
| 529 |
+
|
| 530 |
+
# Test with glob
|
| 531 |
+
fs.cp(fs_join(source, "subdir*"), target, recursive=True)
|
| 532 |
+
|
| 533 |
+
assert fs.isdir(fs_join(target, "subdir"))
|
| 534 |
+
assert fs.isfile(fs_join(target, "subdir", "subfile.txt"))
|
| 535 |
+
assert fs.isfile(fs_join(target, "subdir.txt"))
|
| 536 |
+
|
| 537 |
+
def test_copy_with_source_and_destination_as_list(
|
| 538 |
+
self, fs, fs_target, fs_join, fs_10_files_with_hashed_names
|
| 539 |
+
):
|
| 540 |
+
# Create the test dir
|
| 541 |
+
source = fs_10_files_with_hashed_names
|
| 542 |
+
target = fs_target
|
| 543 |
+
|
| 544 |
+
# Create list of files for source and destination
|
| 545 |
+
source_files = []
|
| 546 |
+
destination_files = []
|
| 547 |
+
for i in range(10):
|
| 548 |
+
hashed_i = md5(str(i).encode("utf-8")).hexdigest()
|
| 549 |
+
source_files.append(fs_join(source, f"{hashed_i}.txt"))
|
| 550 |
+
destination_files.append(fs_join(target, f"{hashed_i}.txt"))
|
| 551 |
+
|
| 552 |
+
# Copy and assert order was kept
|
| 553 |
+
fs.copy(path1=source_files, path2=destination_files)
|
| 554 |
+
|
| 555 |
+
for i in range(10):
|
| 556 |
+
file_content = fs.cat(destination_files[i]).decode("utf-8")
|
| 557 |
+
assert file_content == str(i)
|
evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6391e173add8cc0acb350a777ada09114763904575fec96a949326b78efcaec
|
| 3 |
+
size 125766
|
evalkit_internvl/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a308f298f15b7a7bdac53aee8ac9ba65f05d48676e07678a0b7a002d4182686
|
| 3 |
+
size 132892
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is dual licensed under the terms of the Apache License, Version
|
| 2 |
+
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
| 3 |
+
# for complete details.
|
| 4 |
+
|
| 5 |
+
__title__ = "packaging"
|
| 6 |
+
__summary__ = "Core utilities for Python packages"
|
| 7 |
+
__uri__ = "https://github.com/pypa/packaging"
|
| 8 |
+
|
| 9 |
+
__version__ = "24.2"
|
| 10 |
+
|
| 11 |
+
__author__ = "Donald Stufft and individual contributors"
|
| 12 |
+
__email__ = "donald@stufft.io"
|
| 13 |
+
|
| 14 |
+
__license__ = "BSD-2-Clause or Apache-2.0"
|
| 15 |
+
__copyright__ = f"2014 {__author__}"
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (501 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_elffile.cpython-310.pyc
ADDED
|
Binary file (3.38 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_manylinux.cpython-310.pyc
ADDED
|
Binary file (6.57 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_musllinux.cpython-310.pyc
ADDED
|
Binary file (3.43 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_parser.cpython-310.pyc
ADDED
|
Binary file (9.24 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_structures.cpython-310.pyc
ADDED
|
Binary file (2.68 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/_tokenizer.cpython-310.pyc
ADDED
|
Binary file (5.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/markers.cpython-310.pyc
ADDED
|
Binary file (7.86 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/requirements.cpython-310.pyc
ADDED
|
Binary file (2.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/specifiers.cpython-310.pyc
ADDED
|
Binary file (31.4 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/tags.cpython-310.pyc
ADDED
|
Binary file (15.2 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (4.63 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/__pycache__/version.cpython-310.pyc
ADDED
|
Binary file (15 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/packaging/_elffile.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
ELF file parser.
|
| 3 |
+
|
| 4 |
+
This provides a class ``ELFFile`` that parses an ELF executable in a similar
|
| 5 |
+
interface to ``ZipFile``. Only the read interface is implemented.
|
| 6 |
+
|
| 7 |
+
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
|
| 8 |
+
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import enum
|
| 14 |
+
import os
|
| 15 |
+
import struct
|
| 16 |
+
from typing import IO
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ELFInvalid(ValueError):
|
| 20 |
+
pass
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class EIClass(enum.IntEnum):
|
| 24 |
+
C32 = 1
|
| 25 |
+
C64 = 2
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class EIData(enum.IntEnum):
|
| 29 |
+
Lsb = 1
|
| 30 |
+
Msb = 2
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class EMachine(enum.IntEnum):
|
| 34 |
+
I386 = 3
|
| 35 |
+
S390 = 22
|
| 36 |
+
Arm = 40
|
| 37 |
+
X8664 = 62
|
| 38 |
+
AArc64 = 183
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class ELFFile:
|
| 42 |
+
"""
|
| 43 |
+
Representation of an ELF executable.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self, f: IO[bytes]) -> None:
|
| 47 |
+
self._f = f
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
ident = self._read("16B")
|
| 51 |
+
except struct.error as e:
|
| 52 |
+
raise ELFInvalid("unable to parse identification") from e
|
| 53 |
+
magic = bytes(ident[:4])
|
| 54 |
+
if magic != b"\x7fELF":
|
| 55 |
+
raise ELFInvalid(f"invalid magic: {magic!r}")
|
| 56 |
+
|
| 57 |
+
self.capacity = ident[4] # Format for program header (bitness).
|
| 58 |
+
self.encoding = ident[5] # Data structure encoding (endianness).
|
| 59 |
+
|
| 60 |
+
try:
|
| 61 |
+
# e_fmt: Format for program header.
|
| 62 |
+
# p_fmt: Format for section header.
|
| 63 |
+
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
|
| 64 |
+
e_fmt, self._p_fmt, self._p_idx = {
|
| 65 |
+
(1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
|
| 66 |
+
(1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
|
| 67 |
+
(2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
|
| 68 |
+
(2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
|
| 69 |
+
}[(self.capacity, self.encoding)]
|
| 70 |
+
except KeyError as e:
|
| 71 |
+
raise ELFInvalid(
|
| 72 |
+
f"unrecognized capacity ({self.capacity}) or "
|
| 73 |
+
f"encoding ({self.encoding})"
|
| 74 |
+
) from e
|
| 75 |
+
|
| 76 |
+
try:
|
| 77 |
+
(
|
| 78 |
+
_,
|
| 79 |
+
self.machine, # Architecture type.
|
| 80 |
+
_,
|
| 81 |
+
_,
|
| 82 |
+
self._e_phoff, # Offset of program header.
|
| 83 |
+
_,
|
| 84 |
+
self.flags, # Processor-specific flags.
|
| 85 |
+
_,
|
| 86 |
+
self._e_phentsize, # Size of section.
|
| 87 |
+
self._e_phnum, # Number of sections.
|
| 88 |
+
) = self._read(e_fmt)
|
| 89 |
+
except struct.error as e:
|
| 90 |
+
raise ELFInvalid("unable to parse machine and section information") from e
|
| 91 |
+
|
| 92 |
+
def _read(self, fmt: str) -> tuple[int, ...]:
|
| 93 |
+
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def interpreter(self) -> str | None:
|
| 97 |
+
"""
|
| 98 |
+
The path recorded in the ``PT_INTERP`` section header.
|
| 99 |
+
"""
|
| 100 |
+
for index in range(self._e_phnum):
|
| 101 |
+
self._f.seek(self._e_phoff + self._e_phentsize * index)
|
| 102 |
+
try:
|
| 103 |
+
data = self._read(self._p_fmt)
|
| 104 |
+
except struct.error:
|
| 105 |
+
continue
|
| 106 |
+
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
|
| 107 |
+
continue
|
| 108 |
+
self._f.seek(data[self._p_idx[1]])
|
| 109 |
+
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
|
| 110 |
+
return None
|
evalkit_internvl/lib/python3.10/site-packages/packaging/_manylinux.py
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import collections
|
| 4 |
+
import contextlib
|
| 5 |
+
import functools
|
| 6 |
+
import os
|
| 7 |
+
import re
|
| 8 |
+
import sys
|
| 9 |
+
import warnings
|
| 10 |
+
from typing import Generator, Iterator, NamedTuple, Sequence
|
| 11 |
+
|
| 12 |
+
from ._elffile import EIClass, EIData, ELFFile, EMachine
|
| 13 |
+
|
| 14 |
+
EF_ARM_ABIMASK = 0xFF000000
|
| 15 |
+
EF_ARM_ABI_VER5 = 0x05000000
|
| 16 |
+
EF_ARM_ABI_FLOAT_HARD = 0x00000400
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
|
| 20 |
+
# as the type for `path` until then.
|
| 21 |
+
@contextlib.contextmanager
|
| 22 |
+
def _parse_elf(path: str) -> Generator[ELFFile | None, None, None]:
|
| 23 |
+
try:
|
| 24 |
+
with open(path, "rb") as f:
|
| 25 |
+
yield ELFFile(f)
|
| 26 |
+
except (OSError, TypeError, ValueError):
|
| 27 |
+
yield None
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _is_linux_armhf(executable: str) -> bool:
|
| 31 |
+
# hard-float ABI can be detected from the ELF header of the running
|
| 32 |
+
# process
|
| 33 |
+
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
|
| 34 |
+
with _parse_elf(executable) as f:
|
| 35 |
+
return (
|
| 36 |
+
f is not None
|
| 37 |
+
and f.capacity == EIClass.C32
|
| 38 |
+
and f.encoding == EIData.Lsb
|
| 39 |
+
and f.machine == EMachine.Arm
|
| 40 |
+
and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
|
| 41 |
+
and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def _is_linux_i686(executable: str) -> bool:
|
| 46 |
+
with _parse_elf(executable) as f:
|
| 47 |
+
return (
|
| 48 |
+
f is not None
|
| 49 |
+
and f.capacity == EIClass.C32
|
| 50 |
+
and f.encoding == EIData.Lsb
|
| 51 |
+
and f.machine == EMachine.I386
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool:
|
| 56 |
+
if "armv7l" in archs:
|
| 57 |
+
return _is_linux_armhf(executable)
|
| 58 |
+
if "i686" in archs:
|
| 59 |
+
return _is_linux_i686(executable)
|
| 60 |
+
allowed_archs = {
|
| 61 |
+
"x86_64",
|
| 62 |
+
"aarch64",
|
| 63 |
+
"ppc64",
|
| 64 |
+
"ppc64le",
|
| 65 |
+
"s390x",
|
| 66 |
+
"loongarch64",
|
| 67 |
+
"riscv64",
|
| 68 |
+
}
|
| 69 |
+
return any(arch in allowed_archs for arch in archs)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# If glibc ever changes its major version, we need to know what the last
|
| 73 |
+
# minor version was, so we can build the complete list of all versions.
|
| 74 |
+
# For now, guess what the highest minor version might be, assume it will
|
| 75 |
+
# be 50 for testing. Once this actually happens, update the dictionary
|
| 76 |
+
# with the actual value.
|
| 77 |
+
_LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class _GLibCVersion(NamedTuple):
|
| 81 |
+
major: int
|
| 82 |
+
minor: int
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _glibc_version_string_confstr() -> str | None:
|
| 86 |
+
"""
|
| 87 |
+
Primary implementation of glibc_version_string using os.confstr.
|
| 88 |
+
"""
|
| 89 |
+
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
|
| 90 |
+
# to be broken or missing. This strategy is used in the standard library
|
| 91 |
+
# platform module.
|
| 92 |
+
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
|
| 93 |
+
try:
|
| 94 |
+
# Should be a string like "glibc 2.17".
|
| 95 |
+
version_string: str | None = os.confstr("CS_GNU_LIBC_VERSION")
|
| 96 |
+
assert version_string is not None
|
| 97 |
+
_, version = version_string.rsplit()
|
| 98 |
+
except (AssertionError, AttributeError, OSError, ValueError):
|
| 99 |
+
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
|
| 100 |
+
return None
|
| 101 |
+
return version
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _glibc_version_string_ctypes() -> str | None:
|
| 105 |
+
"""
|
| 106 |
+
Fallback implementation of glibc_version_string using ctypes.
|
| 107 |
+
"""
|
| 108 |
+
try:
|
| 109 |
+
import ctypes
|
| 110 |
+
except ImportError:
|
| 111 |
+
return None
|
| 112 |
+
|
| 113 |
+
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
|
| 114 |
+
# manpage says, "If filename is NULL, then the returned handle is for the
|
| 115 |
+
# main program". This way we can let the linker do the work to figure out
|
| 116 |
+
# which libc our process is actually using.
|
| 117 |
+
#
|
| 118 |
+
# We must also handle the special case where the executable is not a
|
| 119 |
+
# dynamically linked executable. This can occur when using musl libc,
|
| 120 |
+
# for example. In this situation, dlopen() will error, leading to an
|
| 121 |
+
# OSError. Interestingly, at least in the case of musl, there is no
|
| 122 |
+
# errno set on the OSError. The single string argument used to construct
|
| 123 |
+
# OSError comes from libc itself and is therefore not portable to
|
| 124 |
+
# hard code here. In any case, failure to call dlopen() means we
|
| 125 |
+
# can proceed, so we bail on our attempt.
|
| 126 |
+
try:
|
| 127 |
+
process_namespace = ctypes.CDLL(None)
|
| 128 |
+
except OSError:
|
| 129 |
+
return None
|
| 130 |
+
|
| 131 |
+
try:
|
| 132 |
+
gnu_get_libc_version = process_namespace.gnu_get_libc_version
|
| 133 |
+
except AttributeError:
|
| 134 |
+
# Symbol doesn't exist -> therefore, we are not linked to
|
| 135 |
+
# glibc.
|
| 136 |
+
return None
|
| 137 |
+
|
| 138 |
+
# Call gnu_get_libc_version, which returns a string like "2.5"
|
| 139 |
+
gnu_get_libc_version.restype = ctypes.c_char_p
|
| 140 |
+
version_str: str = gnu_get_libc_version()
|
| 141 |
+
# py2 / py3 compatibility:
|
| 142 |
+
if not isinstance(version_str, str):
|
| 143 |
+
version_str = version_str.decode("ascii")
|
| 144 |
+
|
| 145 |
+
return version_str
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def _glibc_version_string() -> str | None:
|
| 149 |
+
"""Returns glibc version string, or None if not using glibc."""
|
| 150 |
+
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def _parse_glibc_version(version_str: str) -> tuple[int, int]:
|
| 154 |
+
"""Parse glibc version.
|
| 155 |
+
|
| 156 |
+
We use a regexp instead of str.split because we want to discard any
|
| 157 |
+
random junk that might come after the minor version -- this might happen
|
| 158 |
+
in patched/forked versions of glibc (e.g. Linaro's version of glibc
|
| 159 |
+
uses version strings like "2.20-2014.11"). See gh-3588.
|
| 160 |
+
"""
|
| 161 |
+
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
|
| 162 |
+
if not m:
|
| 163 |
+
warnings.warn(
|
| 164 |
+
f"Expected glibc version with 2 components major.minor,"
|
| 165 |
+
f" got: {version_str}",
|
| 166 |
+
RuntimeWarning,
|
| 167 |
+
stacklevel=2,
|
| 168 |
+
)
|
| 169 |
+
return -1, -1
|
| 170 |
+
return int(m.group("major")), int(m.group("minor"))
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
@functools.lru_cache
|
| 174 |
+
def _get_glibc_version() -> tuple[int, int]:
|
| 175 |
+
version_str = _glibc_version_string()
|
| 176 |
+
if version_str is None:
|
| 177 |
+
return (-1, -1)
|
| 178 |
+
return _parse_glibc_version(version_str)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
# From PEP 513, PEP 600
|
| 182 |
+
def _is_compatible(arch: str, version: _GLibCVersion) -> bool:
|
| 183 |
+
sys_glibc = _get_glibc_version()
|
| 184 |
+
if sys_glibc < version:
|
| 185 |
+
return False
|
| 186 |
+
# Check for presence of _manylinux module.
|
| 187 |
+
try:
|
| 188 |
+
import _manylinux
|
| 189 |
+
except ImportError:
|
| 190 |
+
return True
|
| 191 |
+
if hasattr(_manylinux, "manylinux_compatible"):
|
| 192 |
+
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
|
| 193 |
+
if result is not None:
|
| 194 |
+
return bool(result)
|
| 195 |
+
return True
|
| 196 |
+
if version == _GLibCVersion(2, 5):
|
| 197 |
+
if hasattr(_manylinux, "manylinux1_compatible"):
|
| 198 |
+
return bool(_manylinux.manylinux1_compatible)
|
| 199 |
+
if version == _GLibCVersion(2, 12):
|
| 200 |
+
if hasattr(_manylinux, "manylinux2010_compatible"):
|
| 201 |
+
return bool(_manylinux.manylinux2010_compatible)
|
| 202 |
+
if version == _GLibCVersion(2, 17):
|
| 203 |
+
if hasattr(_manylinux, "manylinux2014_compatible"):
|
| 204 |
+
return bool(_manylinux.manylinux2014_compatible)
|
| 205 |
+
return True
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
_LEGACY_MANYLINUX_MAP = {
|
| 209 |
+
# CentOS 7 w/ glibc 2.17 (PEP 599)
|
| 210 |
+
(2, 17): "manylinux2014",
|
| 211 |
+
# CentOS 6 w/ glibc 2.12 (PEP 571)
|
| 212 |
+
(2, 12): "manylinux2010",
|
| 213 |
+
# CentOS 5 w/ glibc 2.5 (PEP 513)
|
| 214 |
+
(2, 5): "manylinux1",
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def platform_tags(archs: Sequence[str]) -> Iterator[str]:
|
| 219 |
+
"""Generate manylinux tags compatible to the current platform.
|
| 220 |
+
|
| 221 |
+
:param archs: Sequence of compatible architectures.
|
| 222 |
+
The first one shall be the closest to the actual architecture and be the part of
|
| 223 |
+
platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
|
| 224 |
+
The ``linux_`` prefix is assumed as a prerequisite for the current platform to
|
| 225 |
+
be manylinux-compatible.
|
| 226 |
+
|
| 227 |
+
:returns: An iterator of compatible manylinux tags.
|
| 228 |
+
"""
|
| 229 |
+
if not _have_compatible_abi(sys.executable, archs):
|
| 230 |
+
return
|
| 231 |
+
# Oldest glibc to be supported regardless of architecture is (2, 17).
|
| 232 |
+
too_old_glibc2 = _GLibCVersion(2, 16)
|
| 233 |
+
if set(archs) & {"x86_64", "i686"}:
|
| 234 |
+
# On x86/i686 also oldest glibc to be supported is (2, 5).
|
| 235 |
+
too_old_glibc2 = _GLibCVersion(2, 4)
|
| 236 |
+
current_glibc = _GLibCVersion(*_get_glibc_version())
|
| 237 |
+
glibc_max_list = [current_glibc]
|
| 238 |
+
# We can assume compatibility across glibc major versions.
|
| 239 |
+
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
|
| 240 |
+
#
|
| 241 |
+
# Build a list of maximum glibc versions so that we can
|
| 242 |
+
# output the canonical list of all glibc from current_glibc
|
| 243 |
+
# down to too_old_glibc2, including all intermediary versions.
|
| 244 |
+
for glibc_major in range(current_glibc.major - 1, 1, -1):
|
| 245 |
+
glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
|
| 246 |
+
glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
|
| 247 |
+
for arch in archs:
|
| 248 |
+
for glibc_max in glibc_max_list:
|
| 249 |
+
if glibc_max.major == too_old_glibc2.major:
|
| 250 |
+
min_minor = too_old_glibc2.minor
|
| 251 |
+
else:
|
| 252 |
+
# For other glibc major versions oldest supported is (x, 0).
|
| 253 |
+
min_minor = -1
|
| 254 |
+
for glibc_minor in range(glibc_max.minor, min_minor, -1):
|
| 255 |
+
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
|
| 256 |
+
tag = "manylinux_{}_{}".format(*glibc_version)
|
| 257 |
+
if _is_compatible(arch, glibc_version):
|
| 258 |
+
yield f"{tag}_{arch}"
|
| 259 |
+
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
|
| 260 |
+
if glibc_version in _LEGACY_MANYLINUX_MAP:
|
| 261 |
+
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
|
| 262 |
+
if _is_compatible(arch, glibc_version):
|
| 263 |
+
yield f"{legacy_tag}_{arch}"
|
evalkit_internvl/lib/python3.10/site-packages/packaging/_musllinux.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PEP 656 support.
|
| 2 |
+
|
| 3 |
+
This module implements logic to detect if the currently running Python is
|
| 4 |
+
linked against musl, and what musl version is used.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
import functools
|
| 10 |
+
import re
|
| 11 |
+
import subprocess
|
| 12 |
+
import sys
|
| 13 |
+
from typing import Iterator, NamedTuple, Sequence
|
| 14 |
+
|
| 15 |
+
from ._elffile import ELFFile
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class _MuslVersion(NamedTuple):
|
| 19 |
+
major: int
|
| 20 |
+
minor: int
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _parse_musl_version(output: str) -> _MuslVersion | None:
|
| 24 |
+
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
|
| 25 |
+
if len(lines) < 2 or lines[0][:4] != "musl":
|
| 26 |
+
return None
|
| 27 |
+
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
|
| 28 |
+
if not m:
|
| 29 |
+
return None
|
| 30 |
+
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@functools.lru_cache
|
| 34 |
+
def _get_musl_version(executable: str) -> _MuslVersion | None:
|
| 35 |
+
"""Detect currently-running musl runtime version.
|
| 36 |
+
|
| 37 |
+
This is done by checking the specified executable's dynamic linking
|
| 38 |
+
information, and invoking the loader to parse its output for a version
|
| 39 |
+
string. If the loader is musl, the output would be something like::
|
| 40 |
+
|
| 41 |
+
musl libc (x86_64)
|
| 42 |
+
Version 1.2.2
|
| 43 |
+
Dynamic Program Loader
|
| 44 |
+
"""
|
| 45 |
+
try:
|
| 46 |
+
with open(executable, "rb") as f:
|
| 47 |
+
ld = ELFFile(f).interpreter
|
| 48 |
+
except (OSError, TypeError, ValueError):
|
| 49 |
+
return None
|
| 50 |
+
if ld is None or "musl" not in ld:
|
| 51 |
+
return None
|
| 52 |
+
proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True)
|
| 53 |
+
return _parse_musl_version(proc.stderr)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def platform_tags(archs: Sequence[str]) -> Iterator[str]:
|
| 57 |
+
"""Generate musllinux tags compatible to the current platform.
|
| 58 |
+
|
| 59 |
+
:param archs: Sequence of compatible architectures.
|
| 60 |
+
The first one shall be the closest to the actual architecture and be the part of
|
| 61 |
+
platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
|
| 62 |
+
The ``linux_`` prefix is assumed as a prerequisite for the current platform to
|
| 63 |
+
be musllinux-compatible.
|
| 64 |
+
|
| 65 |
+
:returns: An iterator of compatible musllinux tags.
|
| 66 |
+
"""
|
| 67 |
+
sys_musl = _get_musl_version(sys.executable)
|
| 68 |
+
if sys_musl is None: # Python not dynamically linked against musl.
|
| 69 |
+
return
|
| 70 |
+
for arch in archs:
|
| 71 |
+
for minor in range(sys_musl.minor, -1, -1):
|
| 72 |
+
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__": # pragma: no cover
|
| 76 |
+
import sysconfig
|
| 77 |
+
|
| 78 |
+
plat = sysconfig.get_platform()
|
| 79 |
+
assert plat.startswith("linux-"), "not linux"
|
| 80 |
+
|
| 81 |
+
print("plat:", plat)
|
| 82 |
+
print("musl:", _get_musl_version(sys.executable))
|
| 83 |
+
print("tags:", end=" ")
|
| 84 |
+
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
|
| 85 |
+
print(t, end="\n ")
|
evalkit_internvl/lib/python3.10/site-packages/packaging/_parser.py
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Handwritten parser of dependency specifiers.
|
| 2 |
+
|
| 3 |
+
The docstring for each __parse_* function contains EBNF-inspired grammar representing
|
| 4 |
+
the implementation.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
import ast
|
| 10 |
+
from typing import NamedTuple, Sequence, Tuple, Union
|
| 11 |
+
|
| 12 |
+
from ._tokenizer import DEFAULT_RULES, Tokenizer
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class Node:
|
| 16 |
+
def __init__(self, value: str) -> None:
|
| 17 |
+
self.value = value
|
| 18 |
+
|
| 19 |
+
def __str__(self) -> str:
|
| 20 |
+
return self.value
|
| 21 |
+
|
| 22 |
+
def __repr__(self) -> str:
|
| 23 |
+
return f"<{self.__class__.__name__}('{self}')>"
|
| 24 |
+
|
| 25 |
+
def serialize(self) -> str:
|
| 26 |
+
raise NotImplementedError
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class Variable(Node):
|
| 30 |
+
def serialize(self) -> str:
|
| 31 |
+
return str(self)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class Value(Node):
|
| 35 |
+
def serialize(self) -> str:
|
| 36 |
+
return f'"{self}"'
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class Op(Node):
|
| 40 |
+
def serialize(self) -> str:
|
| 41 |
+
return str(self)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
MarkerVar = Union[Variable, Value]
|
| 45 |
+
MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
|
| 46 |
+
MarkerAtom = Union[MarkerItem, Sequence["MarkerAtom"]]
|
| 47 |
+
MarkerList = Sequence[Union["MarkerList", MarkerAtom, str]]
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class ParsedRequirement(NamedTuple):
|
| 51 |
+
name: str
|
| 52 |
+
url: str
|
| 53 |
+
extras: list[str]
|
| 54 |
+
specifier: str
|
| 55 |
+
marker: MarkerList | None
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# --------------------------------------------------------------------------------------
|
| 59 |
+
# Recursive descent parser for dependency specifier
|
| 60 |
+
# --------------------------------------------------------------------------------------
|
| 61 |
+
def parse_requirement(source: str) -> ParsedRequirement:
|
| 62 |
+
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
|
| 66 |
+
"""
|
| 67 |
+
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
|
| 68 |
+
"""
|
| 69 |
+
tokenizer.consume("WS")
|
| 70 |
+
|
| 71 |
+
name_token = tokenizer.expect(
|
| 72 |
+
"IDENTIFIER", expected="package name at the start of dependency specifier"
|
| 73 |
+
)
|
| 74 |
+
name = name_token.text
|
| 75 |
+
tokenizer.consume("WS")
|
| 76 |
+
|
| 77 |
+
extras = _parse_extras(tokenizer)
|
| 78 |
+
tokenizer.consume("WS")
|
| 79 |
+
|
| 80 |
+
url, specifier, marker = _parse_requirement_details(tokenizer)
|
| 81 |
+
tokenizer.expect("END", expected="end of dependency specifier")
|
| 82 |
+
|
| 83 |
+
return ParsedRequirement(name, url, extras, specifier, marker)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _parse_requirement_details(
|
| 87 |
+
tokenizer: Tokenizer,
|
| 88 |
+
) -> tuple[str, str, MarkerList | None]:
|
| 89 |
+
"""
|
| 90 |
+
requirement_details = AT URL (WS requirement_marker?)?
|
| 91 |
+
| specifier WS? (requirement_marker)?
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
specifier = ""
|
| 95 |
+
url = ""
|
| 96 |
+
marker = None
|
| 97 |
+
|
| 98 |
+
if tokenizer.check("AT"):
|
| 99 |
+
tokenizer.read()
|
| 100 |
+
tokenizer.consume("WS")
|
| 101 |
+
|
| 102 |
+
url_start = tokenizer.position
|
| 103 |
+
url = tokenizer.expect("URL", expected="URL after @").text
|
| 104 |
+
if tokenizer.check("END", peek=True):
|
| 105 |
+
return (url, specifier, marker)
|
| 106 |
+
|
| 107 |
+
tokenizer.expect("WS", expected="whitespace after URL")
|
| 108 |
+
|
| 109 |
+
# The input might end after whitespace.
|
| 110 |
+
if tokenizer.check("END", peek=True):
|
| 111 |
+
return (url, specifier, marker)
|
| 112 |
+
|
| 113 |
+
marker = _parse_requirement_marker(
|
| 114 |
+
tokenizer, span_start=url_start, after="URL and whitespace"
|
| 115 |
+
)
|
| 116 |
+
else:
|
| 117 |
+
specifier_start = tokenizer.position
|
| 118 |
+
specifier = _parse_specifier(tokenizer)
|
| 119 |
+
tokenizer.consume("WS")
|
| 120 |
+
|
| 121 |
+
if tokenizer.check("END", peek=True):
|
| 122 |
+
return (url, specifier, marker)
|
| 123 |
+
|
| 124 |
+
marker = _parse_requirement_marker(
|
| 125 |
+
tokenizer,
|
| 126 |
+
span_start=specifier_start,
|
| 127 |
+
after=(
|
| 128 |
+
"version specifier"
|
| 129 |
+
if specifier
|
| 130 |
+
else "name and no valid version specifier"
|
| 131 |
+
),
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
return (url, specifier, marker)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def _parse_requirement_marker(
|
| 138 |
+
tokenizer: Tokenizer, *, span_start: int, after: str
|
| 139 |
+
) -> MarkerList:
|
| 140 |
+
"""
|
| 141 |
+
requirement_marker = SEMICOLON marker WS?
|
| 142 |
+
"""
|
| 143 |
+
|
| 144 |
+
if not tokenizer.check("SEMICOLON"):
|
| 145 |
+
tokenizer.raise_syntax_error(
|
| 146 |
+
f"Expected end or semicolon (after {after})",
|
| 147 |
+
span_start=span_start,
|
| 148 |
+
)
|
| 149 |
+
tokenizer.read()
|
| 150 |
+
|
| 151 |
+
marker = _parse_marker(tokenizer)
|
| 152 |
+
tokenizer.consume("WS")
|
| 153 |
+
|
| 154 |
+
return marker
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def _parse_extras(tokenizer: Tokenizer) -> list[str]:
|
| 158 |
+
"""
|
| 159 |
+
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
|
| 160 |
+
"""
|
| 161 |
+
if not tokenizer.check("LEFT_BRACKET", peek=True):
|
| 162 |
+
return []
|
| 163 |
+
|
| 164 |
+
with tokenizer.enclosing_tokens(
|
| 165 |
+
"LEFT_BRACKET",
|
| 166 |
+
"RIGHT_BRACKET",
|
| 167 |
+
around="extras",
|
| 168 |
+
):
|
| 169 |
+
tokenizer.consume("WS")
|
| 170 |
+
extras = _parse_extras_list(tokenizer)
|
| 171 |
+
tokenizer.consume("WS")
|
| 172 |
+
|
| 173 |
+
return extras
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _parse_extras_list(tokenizer: Tokenizer) -> list[str]:
|
| 177 |
+
"""
|
| 178 |
+
extras_list = identifier (wsp* ',' wsp* identifier)*
|
| 179 |
+
"""
|
| 180 |
+
extras: list[str] = []
|
| 181 |
+
|
| 182 |
+
if not tokenizer.check("IDENTIFIER"):
|
| 183 |
+
return extras
|
| 184 |
+
|
| 185 |
+
extras.append(tokenizer.read().text)
|
| 186 |
+
|
| 187 |
+
while True:
|
| 188 |
+
tokenizer.consume("WS")
|
| 189 |
+
if tokenizer.check("IDENTIFIER", peek=True):
|
| 190 |
+
tokenizer.raise_syntax_error("Expected comma between extra names")
|
| 191 |
+
elif not tokenizer.check("COMMA"):
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
tokenizer.read()
|
| 195 |
+
tokenizer.consume("WS")
|
| 196 |
+
|
| 197 |
+
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
|
| 198 |
+
extras.append(extra_token.text)
|
| 199 |
+
|
| 200 |
+
return extras
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def _parse_specifier(tokenizer: Tokenizer) -> str:
|
| 204 |
+
"""
|
| 205 |
+
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
|
| 206 |
+
| WS? version_many WS?
|
| 207 |
+
"""
|
| 208 |
+
with tokenizer.enclosing_tokens(
|
| 209 |
+
"LEFT_PARENTHESIS",
|
| 210 |
+
"RIGHT_PARENTHESIS",
|
| 211 |
+
around="version specifier",
|
| 212 |
+
):
|
| 213 |
+
tokenizer.consume("WS")
|
| 214 |
+
parsed_specifiers = _parse_version_many(tokenizer)
|
| 215 |
+
tokenizer.consume("WS")
|
| 216 |
+
|
| 217 |
+
return parsed_specifiers
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def _parse_version_many(tokenizer: Tokenizer) -> str:
|
| 221 |
+
"""
|
| 222 |
+
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
|
| 223 |
+
"""
|
| 224 |
+
parsed_specifiers = ""
|
| 225 |
+
while tokenizer.check("SPECIFIER"):
|
| 226 |
+
span_start = tokenizer.position
|
| 227 |
+
parsed_specifiers += tokenizer.read().text
|
| 228 |
+
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
|
| 229 |
+
tokenizer.raise_syntax_error(
|
| 230 |
+
".* suffix can only be used with `==` or `!=` operators",
|
| 231 |
+
span_start=span_start,
|
| 232 |
+
span_end=tokenizer.position + 1,
|
| 233 |
+
)
|
| 234 |
+
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
|
| 235 |
+
tokenizer.raise_syntax_error(
|
| 236 |
+
"Local version label can only be used with `==` or `!=` operators",
|
| 237 |
+
span_start=span_start,
|
| 238 |
+
span_end=tokenizer.position,
|
| 239 |
+
)
|
| 240 |
+
tokenizer.consume("WS")
|
| 241 |
+
if not tokenizer.check("COMMA"):
|
| 242 |
+
break
|
| 243 |
+
parsed_specifiers += tokenizer.read().text
|
| 244 |
+
tokenizer.consume("WS")
|
| 245 |
+
|
| 246 |
+
return parsed_specifiers
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
# --------------------------------------------------------------------------------------
|
| 250 |
+
# Recursive descent parser for marker expression
|
| 251 |
+
# --------------------------------------------------------------------------------------
|
| 252 |
+
def parse_marker(source: str) -> MarkerList:
|
| 253 |
+
return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES))
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList:
|
| 257 |
+
retval = _parse_marker(tokenizer)
|
| 258 |
+
tokenizer.expect("END", expected="end of marker expression")
|
| 259 |
+
return retval
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
|
| 263 |
+
"""
|
| 264 |
+
marker = marker_atom (BOOLOP marker_atom)+
|
| 265 |
+
"""
|
| 266 |
+
expression = [_parse_marker_atom(tokenizer)]
|
| 267 |
+
while tokenizer.check("BOOLOP"):
|
| 268 |
+
token = tokenizer.read()
|
| 269 |
+
expr_right = _parse_marker_atom(tokenizer)
|
| 270 |
+
expression.extend((token.text, expr_right))
|
| 271 |
+
return expression
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
|
| 275 |
+
"""
|
| 276 |
+
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
|
| 277 |
+
| WS? marker_item WS?
|
| 278 |
+
"""
|
| 279 |
+
|
| 280 |
+
tokenizer.consume("WS")
|
| 281 |
+
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
|
| 282 |
+
with tokenizer.enclosing_tokens(
|
| 283 |
+
"LEFT_PARENTHESIS",
|
| 284 |
+
"RIGHT_PARENTHESIS",
|
| 285 |
+
around="marker expression",
|
| 286 |
+
):
|
| 287 |
+
tokenizer.consume("WS")
|
| 288 |
+
marker: MarkerAtom = _parse_marker(tokenizer)
|
| 289 |
+
tokenizer.consume("WS")
|
| 290 |
+
else:
|
| 291 |
+
marker = _parse_marker_item(tokenizer)
|
| 292 |
+
tokenizer.consume("WS")
|
| 293 |
+
return marker
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
|
| 297 |
+
"""
|
| 298 |
+
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
|
| 299 |
+
"""
|
| 300 |
+
tokenizer.consume("WS")
|
| 301 |
+
marker_var_left = _parse_marker_var(tokenizer)
|
| 302 |
+
tokenizer.consume("WS")
|
| 303 |
+
marker_op = _parse_marker_op(tokenizer)
|
| 304 |
+
tokenizer.consume("WS")
|
| 305 |
+
marker_var_right = _parse_marker_var(tokenizer)
|
| 306 |
+
tokenizer.consume("WS")
|
| 307 |
+
return (marker_var_left, marker_op, marker_var_right)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
|
| 311 |
+
"""
|
| 312 |
+
marker_var = VARIABLE | QUOTED_STRING
|
| 313 |
+
"""
|
| 314 |
+
if tokenizer.check("VARIABLE"):
|
| 315 |
+
return process_env_var(tokenizer.read().text.replace(".", "_"))
|
| 316 |
+
elif tokenizer.check("QUOTED_STRING"):
|
| 317 |
+
return process_python_str(tokenizer.read().text)
|
| 318 |
+
else:
|
| 319 |
+
tokenizer.raise_syntax_error(
|
| 320 |
+
message="Expected a marker variable or quoted string"
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def process_env_var(env_var: str) -> Variable:
|
| 325 |
+
if env_var in ("platform_python_implementation", "python_implementation"):
|
| 326 |
+
return Variable("platform_python_implementation")
|
| 327 |
+
else:
|
| 328 |
+
return Variable(env_var)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def process_python_str(python_str: str) -> Value:
|
| 332 |
+
value = ast.literal_eval(python_str)
|
| 333 |
+
return Value(str(value))
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
|
| 337 |
+
"""
|
| 338 |
+
marker_op = IN | NOT IN | OP
|
| 339 |
+
"""
|
| 340 |
+
if tokenizer.check("IN"):
|
| 341 |
+
tokenizer.read()
|
| 342 |
+
return Op("in")
|
| 343 |
+
elif tokenizer.check("NOT"):
|
| 344 |
+
tokenizer.read()
|
| 345 |
+
tokenizer.expect("WS", expected="whitespace after 'not'")
|
| 346 |
+
tokenizer.expect("IN", expected="'in' after 'not'")
|
| 347 |
+
return Op("not in")
|
| 348 |
+
elif tokenizer.check("OP"):
|
| 349 |
+
return Op(tokenizer.read().text)
|
| 350 |
+
else:
|
| 351 |
+
return tokenizer.raise_syntax_error(
|
| 352 |
+
"Expected marker operator, one of "
|
| 353 |
+
"<=, <, !=, ==, >=, >, ~=, ===, in, not in"
|
| 354 |
+
)
|