Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +3 -0
- parrot/lib/libitm.so.1 +3 -0
- parrot/lib/libquadmath.so.0 +3 -0
- parrot/lib/python3.10/site-packages/git/__pycache__/config.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/git/__pycache__/diff.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/git/__pycache__/util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/git/index/__init__.py +16 -0
- parrot/lib/python3.10/site-packages/git/index/__pycache__/typ.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/git/index/__pycache__/util.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/git/index/base.py +1518 -0
- parrot/lib/python3.10/site-packages/git/index/util.py +121 -0
- parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc +3 -0
- parrot/lib/python3.10/site-packages/imageio/__init__.py +131 -0
- parrot/lib/python3.10/site-packages/imageio/__main__.py +169 -0
- parrot/lib/python3.10/site-packages/imageio/config/__init__.py +16 -0
- parrot/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/config/__pycache__/plugins.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/config/extensions.py +2002 -0
- parrot/lib/python3.10/site-packages/imageio/config/extensions.pyi +24 -0
- parrot/lib/python3.10/site-packages/imageio/config/plugins.py +782 -0
- parrot/lib/python3.10/site-packages/imageio/config/plugins.pyi +28 -0
- parrot/lib/python3.10/site-packages/imageio/freeze.py +11 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__init__.py +103 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/example.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimage.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/gdal.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/grab.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/lytro.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/opencv.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/rawpy.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/spe.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/swf.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile.cpython-310.pyc +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/_bsdf.py +915 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/_dicom.py +932 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/_freeimage.py +1312 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/_swf.py +897 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/_tifffile.py +0 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/bsdf.py +324 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/dicom.py +333 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/example.py +145 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/feisem.py +95 -0
- parrot/lib/python3.10/site-packages/imageio/plugins/ffmpeg.py +729 -0
.gitattributes
CHANGED
|
@@ -156,3 +156,6 @@ parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/internal_api.
|
|
| 156 |
parrot/lib/libgomp.so.1 filter=lfs diff=lfs merge=lfs -text
|
| 157 |
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 158 |
parrot/lib/libquadmath.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
parrot/lib/libgomp.so.1 filter=lfs diff=lfs merge=lfs -text
|
| 157 |
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 158 |
parrot/lib/libquadmath.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
|
| 159 |
+
parrot/lib/libquadmath.so.0 filter=lfs diff=lfs merge=lfs -text
|
| 160 |
+
parrot/lib/libitm.so.1 filter=lfs diff=lfs merge=lfs -text
|
| 161 |
+
parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
parrot/lib/libitm.so.1
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70a7a1a8352b39da726e026874f1854096cdd1c60e80ea5cf97a4e38055ea7c1
|
| 3 |
+
size 1018904
|
parrot/lib/libquadmath.so.0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10c6fadba4c2f6d77e836a50aadbd92e95b137a85eb01b1ca183b50d8f39a2c6
|
| 3 |
+
size 1009408
|
parrot/lib/python3.10/site-packages/git/__pycache__/config.cpython-310.pyc
ADDED
|
Binary file (27.4 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/git/__pycache__/diff.cpython-310.pyc
ADDED
|
Binary file (17.8 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/git/__pycache__/util.cpython-310.pyc
ADDED
|
Binary file (39.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/git/index/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This module is part of GitPython and is released under the
|
| 2 |
+
# 3-Clause BSD License: https://opensource.org/license/bsd-3-clause/
|
| 3 |
+
|
| 4 |
+
"""Initialize the index package."""
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"BaseIndexEntry",
|
| 8 |
+
"BlobFilter",
|
| 9 |
+
"CheckoutError",
|
| 10 |
+
"IndexEntry",
|
| 11 |
+
"IndexFile",
|
| 12 |
+
"StageType",
|
| 13 |
+
]
|
| 14 |
+
|
| 15 |
+
from .base import CheckoutError, IndexFile
|
| 16 |
+
from .typ import BaseIndexEntry, BlobFilter, IndexEntry, StageType
|
parrot/lib/python3.10/site-packages/git/index/__pycache__/typ.cpython-310.pyc
ADDED
|
Binary file (7.25 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/git/index/__pycache__/util.cpython-310.pyc
ADDED
|
Binary file (3.89 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/git/index/base.py
ADDED
|
@@ -0,0 +1,1518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
|
| 2 |
+
#
|
| 3 |
+
# This module is part of GitPython and is released under the
|
| 4 |
+
# 3-Clause BSD License: https://opensource.org/license/bsd-3-clause/
|
| 5 |
+
|
| 6 |
+
"""Module containing :class:`IndexFile`, an Index implementation facilitating all kinds
|
| 7 |
+
of index manipulations such as querying and merging."""
|
| 8 |
+
|
| 9 |
+
__all__ = ["IndexFile", "CheckoutError", "StageType"]
|
| 10 |
+
|
| 11 |
+
import contextlib
|
| 12 |
+
import datetime
|
| 13 |
+
import glob
|
| 14 |
+
from io import BytesIO
|
| 15 |
+
import os
|
| 16 |
+
import os.path as osp
|
| 17 |
+
from stat import S_ISLNK
|
| 18 |
+
import subprocess
|
| 19 |
+
import sys
|
| 20 |
+
import tempfile
|
| 21 |
+
|
| 22 |
+
from gitdb.base import IStream
|
| 23 |
+
from gitdb.db import MemoryDB
|
| 24 |
+
|
| 25 |
+
from git.compat import defenc, force_bytes
|
| 26 |
+
import git.diff as git_diff
|
| 27 |
+
from git.exc import CheckoutError, GitCommandError, GitError, InvalidGitRepositoryError
|
| 28 |
+
from git.objects import Blob, Commit, Object, Submodule, Tree
|
| 29 |
+
from git.objects.util import Serializable
|
| 30 |
+
from git.util import (
|
| 31 |
+
LazyMixin,
|
| 32 |
+
LockedFD,
|
| 33 |
+
join_path_native,
|
| 34 |
+
file_contents_ro,
|
| 35 |
+
to_native_path_linux,
|
| 36 |
+
unbare_repo,
|
| 37 |
+
to_bin_sha,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
from .fun import (
|
| 41 |
+
S_IFGITLINK,
|
| 42 |
+
aggressive_tree_merge,
|
| 43 |
+
entry_key,
|
| 44 |
+
read_cache,
|
| 45 |
+
run_commit_hook,
|
| 46 |
+
stat_mode_to_index_mode,
|
| 47 |
+
write_cache,
|
| 48 |
+
write_tree_from_cache,
|
| 49 |
+
)
|
| 50 |
+
from .typ import BaseIndexEntry, IndexEntry, StageType
|
| 51 |
+
from .util import TemporaryFileSwap, post_clear_cache, default_index, git_working_dir
|
| 52 |
+
|
| 53 |
+
# typing -----------------------------------------------------------------------------
|
| 54 |
+
|
| 55 |
+
from typing import (
|
| 56 |
+
Any,
|
| 57 |
+
BinaryIO,
|
| 58 |
+
Callable,
|
| 59 |
+
Dict,
|
| 60 |
+
Generator,
|
| 61 |
+
IO,
|
| 62 |
+
Iterable,
|
| 63 |
+
Iterator,
|
| 64 |
+
List,
|
| 65 |
+
NoReturn,
|
| 66 |
+
Sequence,
|
| 67 |
+
TYPE_CHECKING,
|
| 68 |
+
Tuple,
|
| 69 |
+
Union,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
from git.types import Literal, PathLike
|
| 73 |
+
|
| 74 |
+
if TYPE_CHECKING:
|
| 75 |
+
from subprocess import Popen
|
| 76 |
+
|
| 77 |
+
from git.refs.reference import Reference
|
| 78 |
+
from git.repo import Repo
|
| 79 |
+
from git.util import Actor
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
Treeish = Union[Tree, Commit, str, bytes]
|
| 83 |
+
|
| 84 |
+
# ------------------------------------------------------------------------------------
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@contextlib.contextmanager
|
| 88 |
+
def _named_temporary_file_for_subprocess(directory: PathLike) -> Generator[str, None, None]:
|
| 89 |
+
"""Create a named temporary file git subprocesses can open, deleting it afterward.
|
| 90 |
+
|
| 91 |
+
:param directory:
|
| 92 |
+
The directory in which the file is created.
|
| 93 |
+
|
| 94 |
+
:return:
|
| 95 |
+
A context manager object that creates the file and provides its name on entry,
|
| 96 |
+
and deletes it on exit.
|
| 97 |
+
"""
|
| 98 |
+
if sys.platform == "win32":
|
| 99 |
+
fd, name = tempfile.mkstemp(dir=directory)
|
| 100 |
+
os.close(fd)
|
| 101 |
+
try:
|
| 102 |
+
yield name
|
| 103 |
+
finally:
|
| 104 |
+
os.remove(name)
|
| 105 |
+
else:
|
| 106 |
+
with tempfile.NamedTemporaryFile(dir=directory) as ctx:
|
| 107 |
+
yield ctx.name
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class IndexFile(LazyMixin, git_diff.Diffable, Serializable):
|
| 111 |
+
"""An Index that can be manipulated using a native implementation in order to save
|
| 112 |
+
git command function calls wherever possible.
|
| 113 |
+
|
| 114 |
+
This provides custom merging facilities allowing to merge without actually changing
|
| 115 |
+
your index or your working tree. This way you can perform your own test merges based
|
| 116 |
+
on the index only without having to deal with the working copy. This is useful in
|
| 117 |
+
case of partial working trees.
|
| 118 |
+
|
| 119 |
+
Entries:
|
| 120 |
+
|
| 121 |
+
The index contains an entries dict whose keys are tuples of type
|
| 122 |
+
:class:`~git.index.typ.IndexEntry` to facilitate access.
|
| 123 |
+
|
| 124 |
+
You may read the entries dict or manipulate it using IndexEntry instance, i.e.::
|
| 125 |
+
|
| 126 |
+
index.entries[index.entry_key(index_entry_instance)] = index_entry_instance
|
| 127 |
+
|
| 128 |
+
Make sure you use :meth:`index.write() <write>` once you are done manipulating the
|
| 129 |
+
index directly before operating on it using the git command.
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
__slots__ = ("repo", "version", "entries", "_extension_data", "_file_path")
|
| 133 |
+
|
| 134 |
+
_VERSION = 2
|
| 135 |
+
"""The latest version we support."""
|
| 136 |
+
|
| 137 |
+
S_IFGITLINK = S_IFGITLINK
|
| 138 |
+
"""Flags for a submodule."""
|
| 139 |
+
|
| 140 |
+
def __init__(self, repo: "Repo", file_path: Union[PathLike, None] = None) -> None:
|
| 141 |
+
"""Initialize this Index instance, optionally from the given `file_path`.
|
| 142 |
+
|
| 143 |
+
If no `file_path` is given, we will be created from the current index file.
|
| 144 |
+
|
| 145 |
+
If a stream is not given, the stream will be initialized from the current
|
| 146 |
+
repository's index on demand.
|
| 147 |
+
"""
|
| 148 |
+
self.repo = repo
|
| 149 |
+
self.version = self._VERSION
|
| 150 |
+
self._extension_data = b""
|
| 151 |
+
self._file_path: PathLike = file_path or self._index_path()
|
| 152 |
+
|
| 153 |
+
def _set_cache_(self, attr: str) -> None:
|
| 154 |
+
if attr == "entries":
|
| 155 |
+
try:
|
| 156 |
+
fd = os.open(self._file_path, os.O_RDONLY)
|
| 157 |
+
except OSError:
|
| 158 |
+
# In new repositories, there may be no index, which means we are empty.
|
| 159 |
+
self.entries: Dict[Tuple[PathLike, StageType], IndexEntry] = {}
|
| 160 |
+
return
|
| 161 |
+
# END exception handling
|
| 162 |
+
|
| 163 |
+
try:
|
| 164 |
+
stream = file_contents_ro(fd, stream=True, allow_mmap=True)
|
| 165 |
+
finally:
|
| 166 |
+
os.close(fd)
|
| 167 |
+
|
| 168 |
+
self._deserialize(stream)
|
| 169 |
+
else:
|
| 170 |
+
super()._set_cache_(attr)
|
| 171 |
+
|
| 172 |
+
def _index_path(self) -> PathLike:
|
| 173 |
+
if self.repo.git_dir:
|
| 174 |
+
return join_path_native(self.repo.git_dir, "index")
|
| 175 |
+
else:
|
| 176 |
+
raise GitCommandError("No git directory given to join index path")
|
| 177 |
+
|
| 178 |
+
@property
|
| 179 |
+
def path(self) -> PathLike:
|
| 180 |
+
""":return: Path to the index file we are representing"""
|
| 181 |
+
return self._file_path
|
| 182 |
+
|
| 183 |
+
def _delete_entries_cache(self) -> None:
|
| 184 |
+
"""Safely clear the entries cache so it can be recreated."""
|
| 185 |
+
try:
|
| 186 |
+
del self.entries
|
| 187 |
+
except AttributeError:
|
| 188 |
+
# It failed in Python 2.6.5 with AttributeError.
|
| 189 |
+
# FIXME: Look into whether we can just remove this except clause now.
|
| 190 |
+
pass
|
| 191 |
+
# END exception handling
|
| 192 |
+
|
| 193 |
+
# { Serializable Interface
|
| 194 |
+
|
| 195 |
+
def _deserialize(self, stream: IO) -> "IndexFile":
|
| 196 |
+
"""Initialize this instance with index values read from the given stream."""
|
| 197 |
+
self.version, self.entries, self._extension_data, _conten_sha = read_cache(stream)
|
| 198 |
+
return self
|
| 199 |
+
|
| 200 |
+
def _entries_sorted(self) -> List[IndexEntry]:
|
| 201 |
+
""":return: List of entries, in a sorted fashion, first by path, then by stage"""
|
| 202 |
+
return sorted(self.entries.values(), key=lambda e: (e.path, e.stage))
|
| 203 |
+
|
| 204 |
+
def _serialize(self, stream: IO, ignore_extension_data: bool = False) -> "IndexFile":
|
| 205 |
+
entries = self._entries_sorted()
|
| 206 |
+
extension_data = self._extension_data # type: Union[None, bytes]
|
| 207 |
+
if ignore_extension_data:
|
| 208 |
+
extension_data = None
|
| 209 |
+
write_cache(entries, stream, extension_data)
|
| 210 |
+
return self
|
| 211 |
+
|
| 212 |
+
# } END serializable interface
|
| 213 |
+
|
| 214 |
+
def write(
|
| 215 |
+
self,
|
| 216 |
+
file_path: Union[None, PathLike] = None,
|
| 217 |
+
ignore_extension_data: bool = False,
|
| 218 |
+
) -> None:
|
| 219 |
+
"""Write the current state to our file path or to the given one.
|
| 220 |
+
|
| 221 |
+
:param file_path:
|
| 222 |
+
If ``None``, we will write to our stored file path from which we have been
|
| 223 |
+
initialized. Otherwise we write to the given file path. Please note that
|
| 224 |
+
this will change the `file_path` of this index to the one you gave.
|
| 225 |
+
|
| 226 |
+
:param ignore_extension_data:
|
| 227 |
+
If ``True``, the TREE type extension data read in the index will not be
|
| 228 |
+
written to disk. NOTE that no extension data is actually written. Use this
|
| 229 |
+
if you have altered the index and would like to use
|
| 230 |
+
:manpage:`git-write-tree(1)` afterwards to create a tree representing your
|
| 231 |
+
written changes. If this data is present in the written index,
|
| 232 |
+
:manpage:`git-write-tree(1)` will instead write the stored/cached tree.
|
| 233 |
+
Alternatively, use :meth:`write_tree` to handle this case automatically.
|
| 234 |
+
"""
|
| 235 |
+
# Make sure we have our entries read before getting a write lock.
|
| 236 |
+
# Otherwise it would be done when streaming.
|
| 237 |
+
# This can happen if one doesn't change the index, but writes it right away.
|
| 238 |
+
self.entries # noqa: B018
|
| 239 |
+
lfd = LockedFD(file_path or self._file_path)
|
| 240 |
+
stream = lfd.open(write=True, stream=True)
|
| 241 |
+
|
| 242 |
+
try:
|
| 243 |
+
self._serialize(stream, ignore_extension_data)
|
| 244 |
+
except BaseException:
|
| 245 |
+
lfd.rollback()
|
| 246 |
+
raise
|
| 247 |
+
|
| 248 |
+
lfd.commit()
|
| 249 |
+
|
| 250 |
+
# Make sure we represent what we have written.
|
| 251 |
+
if file_path is not None:
|
| 252 |
+
self._file_path = file_path
|
| 253 |
+
|
| 254 |
+
@post_clear_cache
|
| 255 |
+
@default_index
|
| 256 |
+
def merge_tree(self, rhs: Treeish, base: Union[None, Treeish] = None) -> "IndexFile":
|
| 257 |
+
"""Merge the given `rhs` treeish into the current index, possibly taking
|
| 258 |
+
a common base treeish into account.
|
| 259 |
+
|
| 260 |
+
As opposed to the :func:`from_tree` method, this allows you to use an already
|
| 261 |
+
existing tree as the left side of the merge.
|
| 262 |
+
|
| 263 |
+
:param rhs:
|
| 264 |
+
Treeish reference pointing to the 'other' side of the merge.
|
| 265 |
+
|
| 266 |
+
:param base:
|
| 267 |
+
Optional treeish reference pointing to the common base of `rhs` and this
|
| 268 |
+
index which equals lhs.
|
| 269 |
+
|
| 270 |
+
:return:
|
| 271 |
+
self (containing the merge and possibly unmerged entries in case of
|
| 272 |
+
conflicts)
|
| 273 |
+
|
| 274 |
+
:raise git.exc.GitCommandError:
|
| 275 |
+
If there is a merge conflict. The error will be raised at the first
|
| 276 |
+
conflicting path. If you want to have proper merge resolution to be done by
|
| 277 |
+
yourself, you have to commit the changed index (or make a valid tree from
|
| 278 |
+
it) and retry with a three-way :meth:`index.from_tree <from_tree>` call.
|
| 279 |
+
"""
|
| 280 |
+
# -i : ignore working tree status
|
| 281 |
+
# --aggressive : handle more merge cases
|
| 282 |
+
# -m : do an actual merge
|
| 283 |
+
args: List[Union[Treeish, str]] = ["--aggressive", "-i", "-m"]
|
| 284 |
+
if base is not None:
|
| 285 |
+
args.append(base)
|
| 286 |
+
args.append(rhs)
|
| 287 |
+
|
| 288 |
+
self.repo.git.read_tree(args)
|
| 289 |
+
return self
|
| 290 |
+
|
| 291 |
+
@classmethod
|
| 292 |
+
def new(cls, repo: "Repo", *tree_sha: Union[str, Tree]) -> "IndexFile":
|
| 293 |
+
"""Merge the given treeish revisions into a new index which is returned.
|
| 294 |
+
|
| 295 |
+
This method behaves like ``git-read-tree --aggressive`` when doing the merge.
|
| 296 |
+
|
| 297 |
+
:param repo:
|
| 298 |
+
The repository treeish are located in.
|
| 299 |
+
|
| 300 |
+
:param tree_sha:
|
| 301 |
+
20 byte or 40 byte tree sha or tree objects.
|
| 302 |
+
|
| 303 |
+
:return:
|
| 304 |
+
New :class:`IndexFile` instance. Its path will be undefined.
|
| 305 |
+
If you intend to write such a merged Index, supply an alternate
|
| 306 |
+
``file_path`` to its :meth:`write` method.
|
| 307 |
+
"""
|
| 308 |
+
tree_sha_bytes: List[bytes] = [to_bin_sha(str(t)) for t in tree_sha]
|
| 309 |
+
base_entries = aggressive_tree_merge(repo.odb, tree_sha_bytes)
|
| 310 |
+
|
| 311 |
+
inst = cls(repo)
|
| 312 |
+
# Convert to entries dict.
|
| 313 |
+
entries: Dict[Tuple[PathLike, int], IndexEntry] = dict(
|
| 314 |
+
zip(
|
| 315 |
+
((e.path, e.stage) for e in base_entries),
|
| 316 |
+
(IndexEntry.from_base(e) for e in base_entries),
|
| 317 |
+
)
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
inst.entries = entries
|
| 321 |
+
return inst
|
| 322 |
+
|
| 323 |
+
@classmethod
|
| 324 |
+
def from_tree(cls, repo: "Repo", *treeish: Treeish, **kwargs: Any) -> "IndexFile":
|
| 325 |
+
R"""Merge the given treeish revisions into a new index which is returned.
|
| 326 |
+
The original index will remain unaltered.
|
| 327 |
+
|
| 328 |
+
:param repo:
|
| 329 |
+
The repository treeish are located in.
|
| 330 |
+
|
| 331 |
+
:param treeish:
|
| 332 |
+
One, two or three :class:`~git.objects.tree.Tree` objects,
|
| 333 |
+
:class:`~git.objects.commit.Commit`\s or 40 byte hexshas.
|
| 334 |
+
|
| 335 |
+
The result changes according to the amount of trees:
|
| 336 |
+
|
| 337 |
+
1. If 1 Tree is given, it will just be read into a new index.
|
| 338 |
+
2. If 2 Trees are given, they will be merged into a new index using a two
|
| 339 |
+
way merge algorithm. Tree 1 is the 'current' tree, tree 2 is the 'other'
|
| 340 |
+
one. It behaves like a fast-forward.
|
| 341 |
+
3. If 3 Trees are given, a 3-way merge will be performed with the first tree
|
| 342 |
+
being the common ancestor of tree 2 and tree 3. Tree 2 is the 'current'
|
| 343 |
+
tree, tree 3 is the 'other' one.
|
| 344 |
+
|
| 345 |
+
:param kwargs:
|
| 346 |
+
Additional arguments passed to :manpage:`git-read-tree(1)`.
|
| 347 |
+
|
| 348 |
+
:return:
|
| 349 |
+
New :class:`IndexFile` instance. It will point to a temporary index location
|
| 350 |
+
which does not exist anymore. If you intend to write such a merged Index,
|
| 351 |
+
supply an alternate ``file_path`` to its :meth:`write` method.
|
| 352 |
+
|
| 353 |
+
:note:
|
| 354 |
+
In the three-way merge case, ``--aggressive`` will be specified to
|
| 355 |
+
automatically resolve more cases in a commonly correct manner. Specify
|
| 356 |
+
``trivial=True`` as a keyword argument to override that.
|
| 357 |
+
|
| 358 |
+
As the underlying :manpage:`git-read-tree(1)` command takes into account the
|
| 359 |
+
current index, it will be temporarily moved out of the way to prevent any
|
| 360 |
+
unexpected interference.
|
| 361 |
+
"""
|
| 362 |
+
if len(treeish) == 0 or len(treeish) > 3:
|
| 363 |
+
raise ValueError("Please specify between 1 and 3 treeish, got %i" % len(treeish))
|
| 364 |
+
|
| 365 |
+
arg_list: List[Union[Treeish, str]] = []
|
| 366 |
+
# Ignore that the working tree and index possibly are out of date.
|
| 367 |
+
if len(treeish) > 1:
|
| 368 |
+
# Drop unmerged entries when reading our index and merging.
|
| 369 |
+
arg_list.append("--reset")
|
| 370 |
+
# Handle non-trivial cases the way a real merge does.
|
| 371 |
+
arg_list.append("--aggressive")
|
| 372 |
+
# END merge handling
|
| 373 |
+
|
| 374 |
+
# Create the temporary file in the .git directory to be sure renaming
|
| 375 |
+
# works - /tmp/ directories could be on another device.
|
| 376 |
+
with _named_temporary_file_for_subprocess(repo.git_dir) as tmp_index:
|
| 377 |
+
arg_list.append("--index-output=%s" % tmp_index)
|
| 378 |
+
arg_list.extend(treeish)
|
| 379 |
+
|
| 380 |
+
# Move the current index out of the way - otherwise the merge may fail as it
|
| 381 |
+
# considers existing entries. Moving it essentially clears the index.
|
| 382 |
+
# Unfortunately there is no 'soft' way to do it.
|
| 383 |
+
# The TemporaryFileSwap ensures the original file gets put back.
|
| 384 |
+
with TemporaryFileSwap(join_path_native(repo.git_dir, "index")):
|
| 385 |
+
repo.git.read_tree(*arg_list, **kwargs)
|
| 386 |
+
index = cls(repo, tmp_index)
|
| 387 |
+
index.entries # noqa: B018 # Force it to read the file as we will delete the temp-file.
|
| 388 |
+
return index
|
| 389 |
+
# END index merge handling
|
| 390 |
+
|
| 391 |
+
# UTILITIES
|
| 392 |
+
|
| 393 |
+
@unbare_repo
|
| 394 |
+
def _iter_expand_paths(self: "IndexFile", paths: Sequence[PathLike]) -> Iterator[PathLike]:
|
| 395 |
+
"""Expand the directories in list of paths to the corresponding paths
|
| 396 |
+
accordingly.
|
| 397 |
+
|
| 398 |
+
:note:
|
| 399 |
+
git will add items multiple times even if a glob overlapped with manually
|
| 400 |
+
specified paths or if paths where specified multiple times - we respect that
|
| 401 |
+
and do not prune.
|
| 402 |
+
"""
|
| 403 |
+
|
| 404 |
+
def raise_exc(e: Exception) -> NoReturn:
|
| 405 |
+
raise e
|
| 406 |
+
|
| 407 |
+
r = str(self.repo.working_tree_dir)
|
| 408 |
+
rs = r + os.sep
|
| 409 |
+
for path in paths:
|
| 410 |
+
abs_path = str(path)
|
| 411 |
+
if not osp.isabs(abs_path):
|
| 412 |
+
abs_path = osp.join(r, path)
|
| 413 |
+
# END make absolute path
|
| 414 |
+
|
| 415 |
+
try:
|
| 416 |
+
st = os.lstat(abs_path) # Handles non-symlinks as well.
|
| 417 |
+
except OSError:
|
| 418 |
+
# The lstat call may fail as the path may contain globs as well.
|
| 419 |
+
pass
|
| 420 |
+
else:
|
| 421 |
+
if S_ISLNK(st.st_mode):
|
| 422 |
+
yield abs_path.replace(rs, "")
|
| 423 |
+
continue
|
| 424 |
+
# END check symlink
|
| 425 |
+
|
| 426 |
+
# If the path is not already pointing to an existing file, resolve globs if possible.
|
| 427 |
+
if not os.path.exists(abs_path) and ("?" in abs_path or "*" in abs_path or "[" in abs_path):
|
| 428 |
+
resolved_paths = glob.glob(abs_path)
|
| 429 |
+
# not abs_path in resolved_paths:
|
| 430 |
+
# A glob() resolving to the same path we are feeding it with is a
|
| 431 |
+
# glob() that failed to resolve. If we continued calling ourselves
|
| 432 |
+
# we'd endlessly recurse. If the condition below evaluates to true
|
| 433 |
+
# then we are likely dealing with a file whose name contains wildcard
|
| 434 |
+
# characters.
|
| 435 |
+
if abs_path not in resolved_paths:
|
| 436 |
+
for f in self._iter_expand_paths(glob.glob(abs_path)):
|
| 437 |
+
yield str(f).replace(rs, "")
|
| 438 |
+
continue
|
| 439 |
+
# END glob handling
|
| 440 |
+
try:
|
| 441 |
+
for root, _dirs, files in os.walk(abs_path, onerror=raise_exc):
|
| 442 |
+
for rela_file in files:
|
| 443 |
+
# Add relative paths only.
|
| 444 |
+
yield osp.join(root.replace(rs, ""), rela_file)
|
| 445 |
+
# END for each file in subdir
|
| 446 |
+
# END for each subdirectory
|
| 447 |
+
except OSError:
|
| 448 |
+
# It was a file or something that could not be iterated.
|
| 449 |
+
yield abs_path.replace(rs, "")
|
| 450 |
+
# END path exception handling
|
| 451 |
+
# END for each path
|
| 452 |
+
|
| 453 |
+
def _write_path_to_stdin(
|
| 454 |
+
self,
|
| 455 |
+
proc: "Popen",
|
| 456 |
+
filepath: PathLike,
|
| 457 |
+
item: PathLike,
|
| 458 |
+
fmakeexc: Callable[..., GitError],
|
| 459 |
+
fprogress: Callable[[PathLike, bool, PathLike], None],
|
| 460 |
+
read_from_stdout: bool = True,
|
| 461 |
+
) -> Union[None, str]:
|
| 462 |
+
"""Write path to ``proc.stdin`` and make sure it processes the item, including
|
| 463 |
+
progress.
|
| 464 |
+
|
| 465 |
+
:return:
|
| 466 |
+
stdout string
|
| 467 |
+
|
| 468 |
+
:param read_from_stdout:
|
| 469 |
+
If ``True``, ``proc.stdout`` will be read after the item was sent to stdin.
|
| 470 |
+
In that case, it will return ``None``.
|
| 471 |
+
|
| 472 |
+
:note:
|
| 473 |
+
There is a bug in :manpage:`git-update-index(1)` that prevents it from
|
| 474 |
+
sending reports just in time. This is why we have a version that tries to
|
| 475 |
+
read stdout and one which doesn't. In fact, the stdout is not important as
|
| 476 |
+
the piped-in files are processed anyway and just in time.
|
| 477 |
+
|
| 478 |
+
:note:
|
| 479 |
+
Newlines are essential here, git's behaviour is somewhat inconsistent on
|
| 480 |
+
this depending on the version, hence we try our best to deal with newlines
|
| 481 |
+
carefully. Usually the last newline will not be sent, instead we will close
|
| 482 |
+
stdin to break the pipe.
|
| 483 |
+
"""
|
| 484 |
+
fprogress(filepath, False, item)
|
| 485 |
+
rval: Union[None, str] = None
|
| 486 |
+
|
| 487 |
+
if proc.stdin is not None:
|
| 488 |
+
try:
|
| 489 |
+
proc.stdin.write(("%s\n" % filepath).encode(defenc))
|
| 490 |
+
except IOError as e:
|
| 491 |
+
# Pipe broke, usually because some error happened.
|
| 492 |
+
raise fmakeexc() from e
|
| 493 |
+
# END write exception handling
|
| 494 |
+
proc.stdin.flush()
|
| 495 |
+
|
| 496 |
+
if read_from_stdout and proc.stdout is not None:
|
| 497 |
+
rval = proc.stdout.readline().strip()
|
| 498 |
+
fprogress(filepath, True, item)
|
| 499 |
+
return rval
|
| 500 |
+
|
| 501 |
+
def iter_blobs(
|
| 502 |
+
self, predicate: Callable[[Tuple[StageType, Blob]], bool] = lambda t: True
|
| 503 |
+
) -> Iterator[Tuple[StageType, Blob]]:
|
| 504 |
+
"""
|
| 505 |
+
:return:
|
| 506 |
+
Iterator yielding tuples of :class:`~git.objects.blob.Blob` objects and
|
| 507 |
+
stages, tuple(stage, Blob).
|
| 508 |
+
|
| 509 |
+
:param predicate:
|
| 510 |
+
Function(t) returning ``True`` if tuple(stage, Blob) should be yielded by
|
| 511 |
+
the iterator. A default filter, the `~git.index.typ.BlobFilter`, allows you
|
| 512 |
+
to yield blobs only if they match a given list of paths.
|
| 513 |
+
"""
|
| 514 |
+
for entry in self.entries.values():
|
| 515 |
+
blob = entry.to_blob(self.repo)
|
| 516 |
+
blob.size = entry.size
|
| 517 |
+
output = (entry.stage, blob)
|
| 518 |
+
if predicate(output):
|
| 519 |
+
yield output
|
| 520 |
+
# END for each entry
|
| 521 |
+
|
| 522 |
+
def unmerged_blobs(self) -> Dict[PathLike, List[Tuple[StageType, Blob]]]:
|
| 523 |
+
"""
|
| 524 |
+
:return:
|
| 525 |
+
Dict(path : list(tuple(stage, Blob, ...))), being a dictionary associating a
|
| 526 |
+
path in the index with a list containing sorted stage/blob pairs.
|
| 527 |
+
|
| 528 |
+
:note:
|
| 529 |
+
Blobs that have been removed in one side simply do not exist in the given
|
| 530 |
+
stage. That is, a file removed on the 'other' branch whose entries are at
|
| 531 |
+
stage 3 will not have a stage 3 entry.
|
| 532 |
+
"""
|
| 533 |
+
is_unmerged_blob = lambda t: t[0] != 0
|
| 534 |
+
path_map: Dict[PathLike, List[Tuple[StageType, Blob]]] = {}
|
| 535 |
+
for stage, blob in self.iter_blobs(is_unmerged_blob):
|
| 536 |
+
path_map.setdefault(blob.path, []).append((stage, blob))
|
| 537 |
+
# END for each unmerged blob
|
| 538 |
+
for line in path_map.values():
|
| 539 |
+
line.sort()
|
| 540 |
+
|
| 541 |
+
return path_map
|
| 542 |
+
|
| 543 |
+
@classmethod
|
| 544 |
+
def entry_key(cls, *entry: Union[BaseIndexEntry, PathLike, StageType]) -> Tuple[PathLike, StageType]:
|
| 545 |
+
return entry_key(*entry)
|
| 546 |
+
|
| 547 |
+
def resolve_blobs(self, iter_blobs: Iterator[Blob]) -> "IndexFile":
|
| 548 |
+
"""Resolve the blobs given in blob iterator.
|
| 549 |
+
|
| 550 |
+
This will effectively remove the index entries of the respective path at all
|
| 551 |
+
non-null stages and add the given blob as new stage null blob.
|
| 552 |
+
|
| 553 |
+
For each path there may only be one blob, otherwise a :exc:`ValueError` will be
|
| 554 |
+
raised claiming the path is already at stage 0.
|
| 555 |
+
|
| 556 |
+
:raise ValueError:
|
| 557 |
+
If one of the blobs already existed at stage 0.
|
| 558 |
+
|
| 559 |
+
:return:
|
| 560 |
+
self
|
| 561 |
+
|
| 562 |
+
:note:
|
| 563 |
+
You will have to write the index manually once you are done, i.e.
|
| 564 |
+
``index.resolve_blobs(blobs).write()``.
|
| 565 |
+
"""
|
| 566 |
+
for blob in iter_blobs:
|
| 567 |
+
stage_null_key = (blob.path, 0)
|
| 568 |
+
if stage_null_key in self.entries:
|
| 569 |
+
raise ValueError("Path %r already exists at stage 0" % str(blob.path))
|
| 570 |
+
# END assert blob is not stage 0 already
|
| 571 |
+
|
| 572 |
+
# Delete all possible stages.
|
| 573 |
+
for stage in (1, 2, 3):
|
| 574 |
+
try:
|
| 575 |
+
del self.entries[(blob.path, stage)]
|
| 576 |
+
except KeyError:
|
| 577 |
+
pass
|
| 578 |
+
# END ignore key errors
|
| 579 |
+
# END for each possible stage
|
| 580 |
+
|
| 581 |
+
self.entries[stage_null_key] = IndexEntry.from_blob(blob)
|
| 582 |
+
# END for each blob
|
| 583 |
+
|
| 584 |
+
return self
|
| 585 |
+
|
| 586 |
+
def update(self) -> "IndexFile":
|
| 587 |
+
"""Reread the contents of our index file, discarding all cached information
|
| 588 |
+
we might have.
|
| 589 |
+
|
| 590 |
+
:note:
|
| 591 |
+
This is a possibly dangerous operations as it will discard your changes to
|
| 592 |
+
:attr:`index.entries <entries>`.
|
| 593 |
+
|
| 594 |
+
:return:
|
| 595 |
+
self
|
| 596 |
+
"""
|
| 597 |
+
self._delete_entries_cache()
|
| 598 |
+
# Allows to lazily reread on demand.
|
| 599 |
+
return self
|
| 600 |
+
|
| 601 |
+
def write_tree(self) -> Tree:
|
| 602 |
+
"""Write this index to a corresponding :class:`~git.objects.tree.Tree` object
|
| 603 |
+
into the repository's object database and return it.
|
| 604 |
+
|
| 605 |
+
:return:
|
| 606 |
+
:class:`~git.objects.tree.Tree` object representing this index.
|
| 607 |
+
|
| 608 |
+
:note:
|
| 609 |
+
The tree will be written even if one or more objects the tree refers to does
|
| 610 |
+
not yet exist in the object database. This could happen if you added entries
|
| 611 |
+
to the index directly.
|
| 612 |
+
|
| 613 |
+
:raise ValueError:
|
| 614 |
+
If there are no entries in the cache.
|
| 615 |
+
|
| 616 |
+
:raise git.exc.UnmergedEntriesError:
|
| 617 |
+
"""
|
| 618 |
+
# We obtain no lock as we just flush our contents to disk as tree.
|
| 619 |
+
# If we are a new index, the entries access will load our data accordingly.
|
| 620 |
+
mdb = MemoryDB()
|
| 621 |
+
entries = self._entries_sorted()
|
| 622 |
+
binsha, tree_items = write_tree_from_cache(entries, mdb, slice(0, len(entries)))
|
| 623 |
+
|
| 624 |
+
# Copy changed trees only.
|
| 625 |
+
mdb.stream_copy(mdb.sha_iter(), self.repo.odb)
|
| 626 |
+
|
| 627 |
+
# Note: Additional deserialization could be saved if write_tree_from_cache would
|
| 628 |
+
# return sorted tree entries.
|
| 629 |
+
root_tree = Tree(self.repo, binsha, path="")
|
| 630 |
+
root_tree._cache = tree_items
|
| 631 |
+
return root_tree
|
| 632 |
+
|
| 633 |
+
def _process_diff_args(
|
| 634 |
+
self,
|
| 635 |
+
args: List[Union[PathLike, "git_diff.Diffable"]],
|
| 636 |
+
) -> List[Union[PathLike, "git_diff.Diffable"]]:
|
| 637 |
+
try:
|
| 638 |
+
args.pop(args.index(self))
|
| 639 |
+
except IndexError:
|
| 640 |
+
pass
|
| 641 |
+
# END remove self
|
| 642 |
+
return args
|
| 643 |
+
|
| 644 |
+
def _to_relative_path(self, path: PathLike) -> PathLike:
|
| 645 |
+
"""
|
| 646 |
+
:return:
|
| 647 |
+
Version of path relative to our git directory or raise :exc:`ValueError` if
|
| 648 |
+
it is not within our git directory.
|
| 649 |
+
|
| 650 |
+
:raise ValueError:
|
| 651 |
+
"""
|
| 652 |
+
if not osp.isabs(path):
|
| 653 |
+
return path
|
| 654 |
+
if self.repo.bare:
|
| 655 |
+
raise InvalidGitRepositoryError("require non-bare repository")
|
| 656 |
+
if not str(path).startswith(str(self.repo.working_tree_dir)):
|
| 657 |
+
raise ValueError("Absolute path %r is not in git repository at %r" % (path, self.repo.working_tree_dir))
|
| 658 |
+
return os.path.relpath(path, self.repo.working_tree_dir)
|
| 659 |
+
|
| 660 |
+
def _preprocess_add_items(
|
| 661 |
+
self, items: Sequence[Union[PathLike, Blob, BaseIndexEntry, "Submodule"]]
|
| 662 |
+
) -> Tuple[List[PathLike], List[BaseIndexEntry]]:
|
| 663 |
+
"""Split the items into two lists of path strings and BaseEntries."""
|
| 664 |
+
paths = []
|
| 665 |
+
entries = []
|
| 666 |
+
# if it is a string put in list
|
| 667 |
+
if isinstance(items, (str, os.PathLike)):
|
| 668 |
+
items = [items]
|
| 669 |
+
|
| 670 |
+
for item in items:
|
| 671 |
+
if isinstance(item, (str, os.PathLike)):
|
| 672 |
+
paths.append(self._to_relative_path(item))
|
| 673 |
+
elif isinstance(item, (Blob, Submodule)):
|
| 674 |
+
entries.append(BaseIndexEntry.from_blob(item))
|
| 675 |
+
elif isinstance(item, BaseIndexEntry):
|
| 676 |
+
entries.append(item)
|
| 677 |
+
else:
|
| 678 |
+
raise TypeError("Invalid Type: %r" % item)
|
| 679 |
+
# END for each item
|
| 680 |
+
return paths, entries
|
| 681 |
+
|
| 682 |
+
def _store_path(self, filepath: PathLike, fprogress: Callable) -> BaseIndexEntry:
|
| 683 |
+
"""Store file at filepath in the database and return the base index entry.
|
| 684 |
+
|
| 685 |
+
:note:
|
| 686 |
+
This needs the :func:`~git.index.util.git_working_dir` decorator active!
|
| 687 |
+
This must be ensured in the calling code.
|
| 688 |
+
"""
|
| 689 |
+
st = os.lstat(filepath) # Handles non-symlinks as well.
|
| 690 |
+
if S_ISLNK(st.st_mode):
|
| 691 |
+
# In PY3, readlink is a string, but we need bytes.
|
| 692 |
+
# In PY2, it was just OS encoded bytes, we assumed UTF-8.
|
| 693 |
+
open_stream: Callable[[], BinaryIO] = lambda: BytesIO(force_bytes(os.readlink(filepath), encoding=defenc))
|
| 694 |
+
else:
|
| 695 |
+
open_stream = lambda: open(filepath, "rb")
|
| 696 |
+
with open_stream() as stream:
|
| 697 |
+
fprogress(filepath, False, filepath)
|
| 698 |
+
istream = self.repo.odb.store(IStream(Blob.type, st.st_size, stream))
|
| 699 |
+
fprogress(filepath, True, filepath)
|
| 700 |
+
return BaseIndexEntry(
|
| 701 |
+
(
|
| 702 |
+
stat_mode_to_index_mode(st.st_mode),
|
| 703 |
+
istream.binsha,
|
| 704 |
+
0,
|
| 705 |
+
to_native_path_linux(filepath),
|
| 706 |
+
)
|
| 707 |
+
)
|
| 708 |
+
|
| 709 |
+
@unbare_repo
|
| 710 |
+
@git_working_dir
|
| 711 |
+
def _entries_for_paths(
|
| 712 |
+
self,
|
| 713 |
+
paths: List[str],
|
| 714 |
+
path_rewriter: Union[Callable, None],
|
| 715 |
+
fprogress: Callable,
|
| 716 |
+
entries: List[BaseIndexEntry],
|
| 717 |
+
) -> List[BaseIndexEntry]:
|
| 718 |
+
entries_added: List[BaseIndexEntry] = []
|
| 719 |
+
if path_rewriter:
|
| 720 |
+
for path in paths:
|
| 721 |
+
if osp.isabs(path):
|
| 722 |
+
abspath = path
|
| 723 |
+
gitrelative_path = path[len(str(self.repo.working_tree_dir)) + 1 :]
|
| 724 |
+
else:
|
| 725 |
+
gitrelative_path = path
|
| 726 |
+
if self.repo.working_tree_dir:
|
| 727 |
+
abspath = osp.join(self.repo.working_tree_dir, gitrelative_path)
|
| 728 |
+
# END obtain relative and absolute paths
|
| 729 |
+
|
| 730 |
+
blob = Blob(
|
| 731 |
+
self.repo,
|
| 732 |
+
Blob.NULL_BIN_SHA,
|
| 733 |
+
stat_mode_to_index_mode(os.stat(abspath).st_mode),
|
| 734 |
+
to_native_path_linux(gitrelative_path),
|
| 735 |
+
)
|
| 736 |
+
# TODO: variable undefined
|
| 737 |
+
entries.append(BaseIndexEntry.from_blob(blob))
|
| 738 |
+
# END for each path
|
| 739 |
+
del paths[:]
|
| 740 |
+
# END rewrite paths
|
| 741 |
+
|
| 742 |
+
# HANDLE PATHS
|
| 743 |
+
assert len(entries_added) == 0
|
| 744 |
+
for filepath in self._iter_expand_paths(paths):
|
| 745 |
+
entries_added.append(self._store_path(filepath, fprogress))
|
| 746 |
+
# END for each filepath
|
| 747 |
+
# END path handling
|
| 748 |
+
return entries_added
|
| 749 |
+
|
| 750 |
+
def add(
|
| 751 |
+
self,
|
| 752 |
+
items: Sequence[Union[PathLike, Blob, BaseIndexEntry, "Submodule"]],
|
| 753 |
+
force: bool = True,
|
| 754 |
+
fprogress: Callable = lambda *args: None,
|
| 755 |
+
path_rewriter: Union[Callable[..., PathLike], None] = None,
|
| 756 |
+
write: bool = True,
|
| 757 |
+
write_extension_data: bool = False,
|
| 758 |
+
) -> List[BaseIndexEntry]:
|
| 759 |
+
R"""Add files from the working tree, specific blobs, or
|
| 760 |
+
:class:`~git.index.typ.BaseIndexEntry`\s to the index.
|
| 761 |
+
|
| 762 |
+
:param items:
|
| 763 |
+
Multiple types of items are supported, types can be mixed within one call.
|
| 764 |
+
Different types imply a different handling. File paths may generally be
|
| 765 |
+
relative or absolute.
|
| 766 |
+
|
| 767 |
+
- path string
|
| 768 |
+
|
| 769 |
+
Strings denote a relative or absolute path into the repository pointing
|
| 770 |
+
to an existing file, e.g., ``CHANGES``, `lib/myfile.ext``,
|
| 771 |
+
``/home/gitrepo/lib/myfile.ext``.
|
| 772 |
+
|
| 773 |
+
Absolute paths must start with working tree directory of this index's
|
| 774 |
+
repository to be considered valid. For example, if it was initialized
|
| 775 |
+
with a non-normalized path, like ``/root/repo/../repo``, absolute paths
|
| 776 |
+
to be added must start with ``/root/repo/../repo``.
|
| 777 |
+
|
| 778 |
+
Paths provided like this must exist. When added, they will be written
|
| 779 |
+
into the object database.
|
| 780 |
+
|
| 781 |
+
PathStrings may contain globs, such as ``lib/__init__*``. Or they can be
|
| 782 |
+
directories like ``lib``, which will add all the files within the
|
| 783 |
+
directory and subdirectories.
|
| 784 |
+
|
| 785 |
+
This equals a straight :manpage:`git-add(1)`.
|
| 786 |
+
|
| 787 |
+
They are added at stage 0.
|
| 788 |
+
|
| 789 |
+
- :class:~`git.objects.blob.Blob` or
|
| 790 |
+
:class:`~git.objects.submodule.base.Submodule` object
|
| 791 |
+
|
| 792 |
+
Blobs are added as they are assuming a valid mode is set.
|
| 793 |
+
|
| 794 |
+
The file they refer to may or may not exist in the file system, but must
|
| 795 |
+
be a path relative to our repository.
|
| 796 |
+
|
| 797 |
+
If their sha is null (40*0), their path must exist in the file system
|
| 798 |
+
relative to the git repository as an object will be created from the
|
| 799 |
+
data at the path.
|
| 800 |
+
|
| 801 |
+
The handling now very much equals the way string paths are processed,
|
| 802 |
+
except that the mode you have set will be kept. This allows you to
|
| 803 |
+
create symlinks by settings the mode respectively and writing the target
|
| 804 |
+
of the symlink directly into the file. This equals a default Linux
|
| 805 |
+
symlink which is not dereferenced automatically, except that it can be
|
| 806 |
+
created on filesystems not supporting it as well.
|
| 807 |
+
|
| 808 |
+
Please note that globs or directories are not allowed in
|
| 809 |
+
:class:`~git.objects.blob.Blob` objects.
|
| 810 |
+
|
| 811 |
+
They are added at stage 0.
|
| 812 |
+
|
| 813 |
+
- :class:`~git.index.typ.BaseIndexEntry` or type
|
| 814 |
+
|
| 815 |
+
Handling equals the one of :class:~`git.objects.blob.Blob` objects, but
|
| 816 |
+
the stage may be explicitly set. Please note that Index Entries require
|
| 817 |
+
binary sha's.
|
| 818 |
+
|
| 819 |
+
:param force:
|
| 820 |
+
**CURRENTLY INEFFECTIVE**
|
| 821 |
+
If ``True``, otherwise ignored or excluded files will be added anyway. As
|
| 822 |
+
opposed to the :manpage:`git-add(1)` command, we enable this flag by default
|
| 823 |
+
as the API user usually wants the item to be added even though they might be
|
| 824 |
+
excluded.
|
| 825 |
+
|
| 826 |
+
:param fprogress:
|
| 827 |
+
Function with signature ``f(path, done=False, item=item)`` called for each
|
| 828 |
+
path to be added, one time once it is about to be added where ``done=False``
|
| 829 |
+
and once after it was added where ``done=True``.
|
| 830 |
+
|
| 831 |
+
``item`` is set to the actual item we handle, either a path or a
|
| 832 |
+
:class:`~git.index.typ.BaseIndexEntry`.
|
| 833 |
+
|
| 834 |
+
Please note that the processed path is not guaranteed to be present in the
|
| 835 |
+
index already as the index is currently being processed.
|
| 836 |
+
|
| 837 |
+
:param path_rewriter:
|
| 838 |
+
Function, with signature ``(string) func(BaseIndexEntry)``, returning a path
|
| 839 |
+
for each passed entry which is the path to be actually recorded for the
|
| 840 |
+
object created from :attr:`entry.path <git.index.typ.BaseIndexEntry.path>`.
|
| 841 |
+
This allows you to write an index which is not identical to the layout of
|
| 842 |
+
the actual files on your hard-disk. If not ``None`` and `items` contain
|
| 843 |
+
plain paths, these paths will be converted to Entries beforehand and passed
|
| 844 |
+
to the path_rewriter. Please note that ``entry.path`` is relative to the git
|
| 845 |
+
repository.
|
| 846 |
+
|
| 847 |
+
:param write:
|
| 848 |
+
If ``True``, the index will be written once it was altered. Otherwise the
|
| 849 |
+
changes only exist in memory and are not available to git commands.
|
| 850 |
+
|
| 851 |
+
:param write_extension_data:
|
| 852 |
+
If ``True``, extension data will be written back to the index. This can lead
|
| 853 |
+
to issues in case it is containing the 'TREE' extension, which will cause
|
| 854 |
+
the :manpage:`git-commit(1)` command to write an old tree, instead of a new
|
| 855 |
+
one representing the now changed index.
|
| 856 |
+
|
| 857 |
+
This doesn't matter if you use :meth:`IndexFile.commit`, which ignores the
|
| 858 |
+
'TREE' extension altogether. You should set it to ``True`` if you intend to
|
| 859 |
+
use :meth:`IndexFile.commit` exclusively while maintaining support for
|
| 860 |
+
third-party extensions. Besides that, you can usually safely ignore the
|
| 861 |
+
built-in extensions when using GitPython on repositories that are not
|
| 862 |
+
handled manually at all.
|
| 863 |
+
|
| 864 |
+
All current built-in extensions are listed here:
|
| 865 |
+
https://git-scm.com/docs/index-format
|
| 866 |
+
|
| 867 |
+
:return:
|
| 868 |
+
List of :class:`~git.index.typ.BaseIndexEntry`\s representing the entries
|
| 869 |
+
just actually added.
|
| 870 |
+
|
| 871 |
+
:raise OSError:
|
| 872 |
+
If a supplied path did not exist. Please note that
|
| 873 |
+
:class:`~git.index.typ.BaseIndexEntry` objects that do not have a null sha
|
| 874 |
+
will be added even if their paths do not exist.
|
| 875 |
+
"""
|
| 876 |
+
# Sort the entries into strings and Entries.
|
| 877 |
+
# Blobs are converted to entries automatically.
|
| 878 |
+
# Paths can be git-added. For everything else we use git-update-index.
|
| 879 |
+
paths, entries = self._preprocess_add_items(items)
|
| 880 |
+
entries_added: List[BaseIndexEntry] = []
|
| 881 |
+
# This code needs a working tree, so we try not to run it unless required.
|
| 882 |
+
# That way, we are OK on a bare repository as well.
|
| 883 |
+
# If there are no paths, the rewriter has nothing to do either.
|
| 884 |
+
if paths:
|
| 885 |
+
entries_added.extend(self._entries_for_paths(paths, path_rewriter, fprogress, entries))
|
| 886 |
+
|
| 887 |
+
# HANDLE ENTRIES
|
| 888 |
+
if entries:
|
| 889 |
+
null_mode_entries = [e for e in entries if e.mode == 0]
|
| 890 |
+
if null_mode_entries:
|
| 891 |
+
raise ValueError(
|
| 892 |
+
"At least one Entry has a null-mode - please use index.remove to remove files for clarity"
|
| 893 |
+
)
|
| 894 |
+
# END null mode should be remove
|
| 895 |
+
|
| 896 |
+
# HANDLE ENTRY OBJECT CREATION
|
| 897 |
+
# Create objects if required, otherwise go with the existing shas.
|
| 898 |
+
null_entries_indices = [i for i, e in enumerate(entries) if e.binsha == Object.NULL_BIN_SHA]
|
| 899 |
+
if null_entries_indices:
|
| 900 |
+
|
| 901 |
+
@git_working_dir
|
| 902 |
+
def handle_null_entries(self: "IndexFile") -> None:
|
| 903 |
+
for ei in null_entries_indices:
|
| 904 |
+
null_entry = entries[ei]
|
| 905 |
+
new_entry = self._store_path(null_entry.path, fprogress)
|
| 906 |
+
|
| 907 |
+
# Update null entry.
|
| 908 |
+
entries[ei] = BaseIndexEntry(
|
| 909 |
+
(
|
| 910 |
+
null_entry.mode,
|
| 911 |
+
new_entry.binsha,
|
| 912 |
+
null_entry.stage,
|
| 913 |
+
null_entry.path,
|
| 914 |
+
)
|
| 915 |
+
)
|
| 916 |
+
# END for each entry index
|
| 917 |
+
|
| 918 |
+
# END closure
|
| 919 |
+
|
| 920 |
+
handle_null_entries(self)
|
| 921 |
+
# END null_entry handling
|
| 922 |
+
|
| 923 |
+
# REWRITE PATHS
|
| 924 |
+
# If we have to rewrite the entries, do so now, after we have generated all
|
| 925 |
+
# object sha's.
|
| 926 |
+
if path_rewriter:
|
| 927 |
+
for i, e in enumerate(entries):
|
| 928 |
+
entries[i] = BaseIndexEntry((e.mode, e.binsha, e.stage, path_rewriter(e)))
|
| 929 |
+
# END for each entry
|
| 930 |
+
# END handle path rewriting
|
| 931 |
+
|
| 932 |
+
# Just go through the remaining entries and provide progress info.
|
| 933 |
+
for i, entry in enumerate(entries):
|
| 934 |
+
progress_sent = i in null_entries_indices
|
| 935 |
+
if not progress_sent:
|
| 936 |
+
fprogress(entry.path, False, entry)
|
| 937 |
+
fprogress(entry.path, True, entry)
|
| 938 |
+
# END handle progress
|
| 939 |
+
# END for each entry
|
| 940 |
+
entries_added.extend(entries)
|
| 941 |
+
# END if there are base entries
|
| 942 |
+
|
| 943 |
+
# FINALIZE
|
| 944 |
+
# Add the new entries to this instance.
|
| 945 |
+
for entry in entries_added:
|
| 946 |
+
self.entries[(entry.path, 0)] = IndexEntry.from_base(entry)
|
| 947 |
+
|
| 948 |
+
if write:
|
| 949 |
+
self.write(ignore_extension_data=not write_extension_data)
|
| 950 |
+
# END handle write
|
| 951 |
+
|
| 952 |
+
return entries_added
|
| 953 |
+
|
| 954 |
+
def _items_to_rela_paths(
|
| 955 |
+
self,
|
| 956 |
+
items: Union[PathLike, Sequence[Union[PathLike, BaseIndexEntry, Blob, Submodule]]],
|
| 957 |
+
) -> List[PathLike]:
|
| 958 |
+
"""Returns a list of repo-relative paths from the given items which
|
| 959 |
+
may be absolute or relative paths, entries or blobs."""
|
| 960 |
+
paths = []
|
| 961 |
+
# If string, put in list.
|
| 962 |
+
if isinstance(items, (str, os.PathLike)):
|
| 963 |
+
items = [items]
|
| 964 |
+
|
| 965 |
+
for item in items:
|
| 966 |
+
if isinstance(item, (BaseIndexEntry, (Blob, Submodule))):
|
| 967 |
+
paths.append(self._to_relative_path(item.path))
|
| 968 |
+
elif isinstance(item, (str, os.PathLike)):
|
| 969 |
+
paths.append(self._to_relative_path(item))
|
| 970 |
+
else:
|
| 971 |
+
raise TypeError("Invalid item type: %r" % item)
|
| 972 |
+
# END for each item
|
| 973 |
+
return paths
|
| 974 |
+
|
| 975 |
+
@post_clear_cache
|
| 976 |
+
@default_index
|
| 977 |
+
def remove(
|
| 978 |
+
self,
|
| 979 |
+
items: Sequence[Union[PathLike, Blob, BaseIndexEntry, "Submodule"]],
|
| 980 |
+
working_tree: bool = False,
|
| 981 |
+
**kwargs: Any,
|
| 982 |
+
) -> List[str]:
|
| 983 |
+
R"""Remove the given items from the index and optionally from the working tree
|
| 984 |
+
as well.
|
| 985 |
+
|
| 986 |
+
:param items:
|
| 987 |
+
Multiple types of items are supported which may be be freely mixed.
|
| 988 |
+
|
| 989 |
+
- path string
|
| 990 |
+
|
| 991 |
+
Remove the given path at all stages. If it is a directory, you must
|
| 992 |
+
specify the ``r=True`` keyword argument to remove all file entries below
|
| 993 |
+
it. If absolute paths are given, they will be converted to a path
|
| 994 |
+
relative to the git repository directory containing the working tree
|
| 995 |
+
|
| 996 |
+
The path string may include globs, such as ``*.c``.
|
| 997 |
+
|
| 998 |
+
- :class:~`git.objects.blob.Blob` object
|
| 999 |
+
|
| 1000 |
+
Only the path portion is used in this case.
|
| 1001 |
+
|
| 1002 |
+
- :class:`~git.index.typ.BaseIndexEntry` or compatible type
|
| 1003 |
+
|
| 1004 |
+
The only relevant information here is the path. The stage is ignored.
|
| 1005 |
+
|
| 1006 |
+
:param working_tree:
|
| 1007 |
+
If ``True``, the entry will also be removed from the working tree,
|
| 1008 |
+
physically removing the respective file. This may fail if there are
|
| 1009 |
+
uncommitted changes in it.
|
| 1010 |
+
|
| 1011 |
+
:param kwargs:
|
| 1012 |
+
Additional keyword arguments to be passed to :manpage:`git-rm(1)`, such as
|
| 1013 |
+
``r`` to allow recursive removal.
|
| 1014 |
+
|
| 1015 |
+
:return:
|
| 1016 |
+
List(path_string, ...) list of repository relative paths that have been
|
| 1017 |
+
removed effectively.
|
| 1018 |
+
|
| 1019 |
+
This is interesting to know in case you have provided a directory or globs.
|
| 1020 |
+
Paths are relative to the repository.
|
| 1021 |
+
"""
|
| 1022 |
+
args = []
|
| 1023 |
+
if not working_tree:
|
| 1024 |
+
args.append("--cached")
|
| 1025 |
+
args.append("--")
|
| 1026 |
+
|
| 1027 |
+
# Preprocess paths.
|
| 1028 |
+
paths = self._items_to_rela_paths(items)
|
| 1029 |
+
removed_paths = self.repo.git.rm(args, paths, **kwargs).splitlines()
|
| 1030 |
+
|
| 1031 |
+
# Process output to gain proper paths.
|
| 1032 |
+
# rm 'path'
|
| 1033 |
+
return [p[4:-1] for p in removed_paths]
|
| 1034 |
+
|
| 1035 |
+
@post_clear_cache
|
| 1036 |
+
@default_index
|
| 1037 |
+
def move(
|
| 1038 |
+
self,
|
| 1039 |
+
items: Sequence[Union[PathLike, Blob, BaseIndexEntry, "Submodule"]],
|
| 1040 |
+
skip_errors: bool = False,
|
| 1041 |
+
**kwargs: Any,
|
| 1042 |
+
) -> List[Tuple[str, str]]:
|
| 1043 |
+
"""Rename/move the items, whereas the last item is considered the destination of
|
| 1044 |
+
the move operation.
|
| 1045 |
+
|
| 1046 |
+
If the destination is a file, the first item (of two) must be a file as well.
|
| 1047 |
+
|
| 1048 |
+
If the destination is a directory, it may be preceded by one or more directories
|
| 1049 |
+
or files.
|
| 1050 |
+
|
| 1051 |
+
The working tree will be affected in non-bare repositories.
|
| 1052 |
+
|
| 1053 |
+
:param items:
|
| 1054 |
+
Multiple types of items are supported, please see the :meth:`remove` method
|
| 1055 |
+
for reference.
|
| 1056 |
+
|
| 1057 |
+
:param skip_errors:
|
| 1058 |
+
If ``True``, errors such as ones resulting from missing source files will be
|
| 1059 |
+
skipped.
|
| 1060 |
+
|
| 1061 |
+
:param kwargs:
|
| 1062 |
+
Additional arguments you would like to pass to :manpage:`git-mv(1)`, such as
|
| 1063 |
+
``dry_run`` or ``force``.
|
| 1064 |
+
|
| 1065 |
+
:return:
|
| 1066 |
+
List(tuple(source_path_string, destination_path_string), ...)
|
| 1067 |
+
|
| 1068 |
+
A list of pairs, containing the source file moved as well as its actual
|
| 1069 |
+
destination. Relative to the repository root.
|
| 1070 |
+
|
| 1071 |
+
:raise ValueError:
|
| 1072 |
+
If only one item was given.
|
| 1073 |
+
|
| 1074 |
+
:raise git.exc.GitCommandError:
|
| 1075 |
+
If git could not handle your request.
|
| 1076 |
+
"""
|
| 1077 |
+
args = []
|
| 1078 |
+
if skip_errors:
|
| 1079 |
+
args.append("-k")
|
| 1080 |
+
|
| 1081 |
+
paths = self._items_to_rela_paths(items)
|
| 1082 |
+
if len(paths) < 2:
|
| 1083 |
+
raise ValueError("Please provide at least one source and one destination of the move operation")
|
| 1084 |
+
|
| 1085 |
+
was_dry_run = kwargs.pop("dry_run", kwargs.pop("n", None))
|
| 1086 |
+
kwargs["dry_run"] = True
|
| 1087 |
+
|
| 1088 |
+
# First execute rename in dry run so the command tells us what it actually does
|
| 1089 |
+
# (for later output).
|
| 1090 |
+
out = []
|
| 1091 |
+
mvlines = self.repo.git.mv(args, paths, **kwargs).splitlines()
|
| 1092 |
+
|
| 1093 |
+
# Parse result - first 0:n/2 lines are 'checking ', the remaining ones are the
|
| 1094 |
+
# 'renaming' ones which we parse.
|
| 1095 |
+
for ln in range(int(len(mvlines) / 2), len(mvlines)):
|
| 1096 |
+
tokens = mvlines[ln].split(" to ")
|
| 1097 |
+
assert len(tokens) == 2, "Too many tokens in %s" % mvlines[ln]
|
| 1098 |
+
|
| 1099 |
+
# [0] = Renaming x
|
| 1100 |
+
# [1] = y
|
| 1101 |
+
out.append((tokens[0][9:], tokens[1]))
|
| 1102 |
+
# END for each line to parse
|
| 1103 |
+
|
| 1104 |
+
# Either prepare for the real run, or output the dry-run result.
|
| 1105 |
+
if was_dry_run:
|
| 1106 |
+
return out
|
| 1107 |
+
# END handle dry run
|
| 1108 |
+
|
| 1109 |
+
# Now apply the actual operation.
|
| 1110 |
+
kwargs.pop("dry_run")
|
| 1111 |
+
self.repo.git.mv(args, paths, **kwargs)
|
| 1112 |
+
|
| 1113 |
+
return out
|
| 1114 |
+
|
| 1115 |
+
def commit(
|
| 1116 |
+
self,
|
| 1117 |
+
message: str,
|
| 1118 |
+
parent_commits: Union[List[Commit], None] = None,
|
| 1119 |
+
head: bool = True,
|
| 1120 |
+
author: Union[None, "Actor"] = None,
|
| 1121 |
+
committer: Union[None, "Actor"] = None,
|
| 1122 |
+
author_date: Union[datetime.datetime, str, None] = None,
|
| 1123 |
+
commit_date: Union[datetime.datetime, str, None] = None,
|
| 1124 |
+
skip_hooks: bool = False,
|
| 1125 |
+
) -> Commit:
|
| 1126 |
+
"""Commit the current default index file, creating a
|
| 1127 |
+
:class:`~git.objects.commit.Commit` object.
|
| 1128 |
+
|
| 1129 |
+
For more information on the arguments, see
|
| 1130 |
+
:meth:`Commit.create_from_tree <git.objects.commit.Commit.create_from_tree>`.
|
| 1131 |
+
|
| 1132 |
+
:note:
|
| 1133 |
+
If you have manually altered the :attr:`entries` member of this instance,
|
| 1134 |
+
don't forget to :meth:`write` your changes to disk beforehand.
|
| 1135 |
+
|
| 1136 |
+
:note:
|
| 1137 |
+
Passing ``skip_hooks=True`` is the equivalent of using ``-n`` or
|
| 1138 |
+
``--no-verify`` on the command line.
|
| 1139 |
+
|
| 1140 |
+
:return:
|
| 1141 |
+
:class:`~git.objects.commit.Commit` object representing the new commit
|
| 1142 |
+
"""
|
| 1143 |
+
if not skip_hooks:
|
| 1144 |
+
run_commit_hook("pre-commit", self)
|
| 1145 |
+
|
| 1146 |
+
self._write_commit_editmsg(message)
|
| 1147 |
+
run_commit_hook("commit-msg", self, self._commit_editmsg_filepath())
|
| 1148 |
+
message = self._read_commit_editmsg()
|
| 1149 |
+
self._remove_commit_editmsg()
|
| 1150 |
+
tree = self.write_tree()
|
| 1151 |
+
rval = Commit.create_from_tree(
|
| 1152 |
+
self.repo,
|
| 1153 |
+
tree,
|
| 1154 |
+
message,
|
| 1155 |
+
parent_commits,
|
| 1156 |
+
head,
|
| 1157 |
+
author=author,
|
| 1158 |
+
committer=committer,
|
| 1159 |
+
author_date=author_date,
|
| 1160 |
+
commit_date=commit_date,
|
| 1161 |
+
)
|
| 1162 |
+
if not skip_hooks:
|
| 1163 |
+
run_commit_hook("post-commit", self)
|
| 1164 |
+
return rval
|
| 1165 |
+
|
| 1166 |
+
def _write_commit_editmsg(self, message: str) -> None:
|
| 1167 |
+
with open(self._commit_editmsg_filepath(), "wb") as commit_editmsg_file:
|
| 1168 |
+
commit_editmsg_file.write(message.encode(defenc))
|
| 1169 |
+
|
| 1170 |
+
def _remove_commit_editmsg(self) -> None:
|
| 1171 |
+
os.remove(self._commit_editmsg_filepath())
|
| 1172 |
+
|
| 1173 |
+
def _read_commit_editmsg(self) -> str:
|
| 1174 |
+
with open(self._commit_editmsg_filepath(), "rb") as commit_editmsg_file:
|
| 1175 |
+
return commit_editmsg_file.read().decode(defenc)
|
| 1176 |
+
|
| 1177 |
+
def _commit_editmsg_filepath(self) -> str:
|
| 1178 |
+
return osp.join(self.repo.common_dir, "COMMIT_EDITMSG")
|
| 1179 |
+
|
| 1180 |
+
def _flush_stdin_and_wait(cls, proc: "Popen[bytes]", ignore_stdout: bool = False) -> bytes:
|
| 1181 |
+
stdin_IO = proc.stdin
|
| 1182 |
+
if stdin_IO:
|
| 1183 |
+
stdin_IO.flush()
|
| 1184 |
+
stdin_IO.close()
|
| 1185 |
+
|
| 1186 |
+
stdout = b""
|
| 1187 |
+
if not ignore_stdout and proc.stdout:
|
| 1188 |
+
stdout = proc.stdout.read()
|
| 1189 |
+
|
| 1190 |
+
if proc.stdout:
|
| 1191 |
+
proc.stdout.close()
|
| 1192 |
+
proc.wait()
|
| 1193 |
+
return stdout
|
| 1194 |
+
|
| 1195 |
+
@default_index
|
| 1196 |
+
def checkout(
|
| 1197 |
+
self,
|
| 1198 |
+
paths: Union[None, Iterable[PathLike]] = None,
|
| 1199 |
+
force: bool = False,
|
| 1200 |
+
fprogress: Callable = lambda *args: None,
|
| 1201 |
+
**kwargs: Any,
|
| 1202 |
+
) -> Union[None, Iterator[PathLike], Sequence[PathLike]]:
|
| 1203 |
+
"""Check out the given paths or all files from the version known to the index
|
| 1204 |
+
into the working tree.
|
| 1205 |
+
|
| 1206 |
+
:note:
|
| 1207 |
+
Be sure you have written pending changes using the :meth:`write` method in
|
| 1208 |
+
case you have altered the entries dictionary directly.
|
| 1209 |
+
|
| 1210 |
+
:param paths:
|
| 1211 |
+
If ``None``, all paths in the index will be checked out.
|
| 1212 |
+
Otherwise an iterable of relative or absolute paths or a single path
|
| 1213 |
+
pointing to files or directories in the index is expected.
|
| 1214 |
+
|
| 1215 |
+
:param force:
|
| 1216 |
+
If ``True``, existing files will be overwritten even if they contain local
|
| 1217 |
+
modifications.
|
| 1218 |
+
If ``False``, these will trigger a :exc:`~git.exc.CheckoutError`.
|
| 1219 |
+
|
| 1220 |
+
:param fprogress:
|
| 1221 |
+
See :meth:`IndexFile.add` for signature and explanation.
|
| 1222 |
+
|
| 1223 |
+
The provided progress information will contain ``None`` as path and item if
|
| 1224 |
+
no explicit paths are given. Otherwise progress information will be send
|
| 1225 |
+
prior and after a file has been checked out.
|
| 1226 |
+
|
| 1227 |
+
:param kwargs:
|
| 1228 |
+
Additional arguments to be passed to :manpage:`git-checkout-index(1)`.
|
| 1229 |
+
|
| 1230 |
+
:return:
|
| 1231 |
+
Iterable yielding paths to files which have been checked out and are
|
| 1232 |
+
guaranteed to match the version stored in the index.
|
| 1233 |
+
|
| 1234 |
+
:raise git.exc.CheckoutError:
|
| 1235 |
+
* If at least one file failed to be checked out. This is a summary, hence it
|
| 1236 |
+
will checkout as many files as it can anyway.
|
| 1237 |
+
* If one of files or directories do not exist in the index (as opposed to
|
| 1238 |
+
the original git command, which ignores them).
|
| 1239 |
+
|
| 1240 |
+
:raise git.exc.GitCommandError:
|
| 1241 |
+
If error lines could not be parsed - this truly is an exceptional state.
|
| 1242 |
+
|
| 1243 |
+
:note:
|
| 1244 |
+
The checkout is limited to checking out the files in the index. Files which
|
| 1245 |
+
are not in the index anymore and exist in the working tree will not be
|
| 1246 |
+
deleted. This behaviour is fundamentally different to ``head.checkout``,
|
| 1247 |
+
i.e. if you want :manpage:`git-checkout(1)`-like behaviour, use
|
| 1248 |
+
``head.checkout`` instead of ``index.checkout``.
|
| 1249 |
+
"""
|
| 1250 |
+
args = ["--index"]
|
| 1251 |
+
if force:
|
| 1252 |
+
args.append("--force")
|
| 1253 |
+
|
| 1254 |
+
failed_files = []
|
| 1255 |
+
failed_reasons = []
|
| 1256 |
+
unknown_lines = []
|
| 1257 |
+
|
| 1258 |
+
def handle_stderr(proc: "Popen[bytes]", iter_checked_out_files: Iterable[PathLike]) -> None:
|
| 1259 |
+
stderr_IO = proc.stderr
|
| 1260 |
+
if not stderr_IO:
|
| 1261 |
+
return # Return early if stderr empty.
|
| 1262 |
+
|
| 1263 |
+
stderr_bytes = stderr_IO.read()
|
| 1264 |
+
# line contents:
|
| 1265 |
+
stderr = stderr_bytes.decode(defenc)
|
| 1266 |
+
# git-checkout-index: this already exists
|
| 1267 |
+
endings = (
|
| 1268 |
+
" already exists",
|
| 1269 |
+
" is not in the cache",
|
| 1270 |
+
" does not exist at stage",
|
| 1271 |
+
" is unmerged",
|
| 1272 |
+
)
|
| 1273 |
+
for line in stderr.splitlines():
|
| 1274 |
+
if not line.startswith("git checkout-index: ") and not line.startswith("git-checkout-index: "):
|
| 1275 |
+
is_a_dir = " is a directory"
|
| 1276 |
+
unlink_issue = "unable to unlink old '"
|
| 1277 |
+
already_exists_issue = " already exists, no checkout" # created by entry.c:checkout_entry(...)
|
| 1278 |
+
if line.endswith(is_a_dir):
|
| 1279 |
+
failed_files.append(line[: -len(is_a_dir)])
|
| 1280 |
+
failed_reasons.append(is_a_dir)
|
| 1281 |
+
elif line.startswith(unlink_issue):
|
| 1282 |
+
failed_files.append(line[len(unlink_issue) : line.rfind("'")])
|
| 1283 |
+
failed_reasons.append(unlink_issue)
|
| 1284 |
+
elif line.endswith(already_exists_issue):
|
| 1285 |
+
failed_files.append(line[: -len(already_exists_issue)])
|
| 1286 |
+
failed_reasons.append(already_exists_issue)
|
| 1287 |
+
else:
|
| 1288 |
+
unknown_lines.append(line)
|
| 1289 |
+
continue
|
| 1290 |
+
# END special lines parsing
|
| 1291 |
+
|
| 1292 |
+
for e in endings:
|
| 1293 |
+
if line.endswith(e):
|
| 1294 |
+
failed_files.append(line[20 : -len(e)])
|
| 1295 |
+
failed_reasons.append(e)
|
| 1296 |
+
break
|
| 1297 |
+
# END if ending matches
|
| 1298 |
+
# END for each possible ending
|
| 1299 |
+
# END for each line
|
| 1300 |
+
if unknown_lines:
|
| 1301 |
+
raise GitCommandError(("git-checkout-index",), 128, stderr)
|
| 1302 |
+
if failed_files:
|
| 1303 |
+
valid_files = list(set(iter_checked_out_files) - set(failed_files))
|
| 1304 |
+
raise CheckoutError(
|
| 1305 |
+
"Some files could not be checked out from the index due to local modifications",
|
| 1306 |
+
failed_files,
|
| 1307 |
+
valid_files,
|
| 1308 |
+
failed_reasons,
|
| 1309 |
+
)
|
| 1310 |
+
|
| 1311 |
+
# END stderr handler
|
| 1312 |
+
|
| 1313 |
+
if paths is None:
|
| 1314 |
+
args.append("--all")
|
| 1315 |
+
kwargs["as_process"] = 1
|
| 1316 |
+
fprogress(None, False, None)
|
| 1317 |
+
proc = self.repo.git.checkout_index(*args, **kwargs)
|
| 1318 |
+
proc.wait()
|
| 1319 |
+
fprogress(None, True, None)
|
| 1320 |
+
rval_iter = (e.path for e in self.entries.values())
|
| 1321 |
+
handle_stderr(proc, rval_iter)
|
| 1322 |
+
return rval_iter
|
| 1323 |
+
else:
|
| 1324 |
+
if isinstance(paths, str):
|
| 1325 |
+
paths = [paths]
|
| 1326 |
+
|
| 1327 |
+
# Make sure we have our entries loaded before we start checkout_index, which
|
| 1328 |
+
# will hold a lock on it. We try to get the lock as well during our entries
|
| 1329 |
+
# initialization.
|
| 1330 |
+
self.entries # noqa: B018
|
| 1331 |
+
|
| 1332 |
+
args.append("--stdin")
|
| 1333 |
+
kwargs["as_process"] = True
|
| 1334 |
+
kwargs["istream"] = subprocess.PIPE
|
| 1335 |
+
proc = self.repo.git.checkout_index(args, **kwargs)
|
| 1336 |
+
# FIXME: Reading from GIL!
|
| 1337 |
+
make_exc = lambda: GitCommandError(("git-checkout-index",) + tuple(args), 128, proc.stderr.read())
|
| 1338 |
+
checked_out_files: List[PathLike] = []
|
| 1339 |
+
|
| 1340 |
+
for path in paths:
|
| 1341 |
+
co_path = to_native_path_linux(self._to_relative_path(path))
|
| 1342 |
+
# If the item is not in the index, it could be a directory.
|
| 1343 |
+
path_is_directory = False
|
| 1344 |
+
|
| 1345 |
+
try:
|
| 1346 |
+
self.entries[(co_path, 0)]
|
| 1347 |
+
except KeyError:
|
| 1348 |
+
folder = str(co_path)
|
| 1349 |
+
if not folder.endswith("/"):
|
| 1350 |
+
folder += "/"
|
| 1351 |
+
for entry in self.entries.values():
|
| 1352 |
+
if str(entry.path).startswith(folder):
|
| 1353 |
+
p = entry.path
|
| 1354 |
+
self._write_path_to_stdin(proc, p, p, make_exc, fprogress, read_from_stdout=False)
|
| 1355 |
+
checked_out_files.append(p)
|
| 1356 |
+
path_is_directory = True
|
| 1357 |
+
# END if entry is in directory
|
| 1358 |
+
# END for each entry
|
| 1359 |
+
# END path exception handlnig
|
| 1360 |
+
|
| 1361 |
+
if not path_is_directory:
|
| 1362 |
+
self._write_path_to_stdin(proc, co_path, path, make_exc, fprogress, read_from_stdout=False)
|
| 1363 |
+
checked_out_files.append(co_path)
|
| 1364 |
+
# END path is a file
|
| 1365 |
+
# END for each path
|
| 1366 |
+
try:
|
| 1367 |
+
self._flush_stdin_and_wait(proc, ignore_stdout=True)
|
| 1368 |
+
except GitCommandError:
|
| 1369 |
+
# Without parsing stdout we don't know what failed.
|
| 1370 |
+
raise CheckoutError( # noqa: B904
|
| 1371 |
+
"Some files could not be checked out from the index, probably because they didn't exist.",
|
| 1372 |
+
failed_files,
|
| 1373 |
+
[],
|
| 1374 |
+
failed_reasons,
|
| 1375 |
+
)
|
| 1376 |
+
|
| 1377 |
+
handle_stderr(proc, checked_out_files)
|
| 1378 |
+
return checked_out_files
|
| 1379 |
+
# END paths handling
|
| 1380 |
+
|
| 1381 |
+
@default_index
|
| 1382 |
+
def reset(
|
| 1383 |
+
self,
|
| 1384 |
+
commit: Union[Commit, "Reference", str] = "HEAD",
|
| 1385 |
+
working_tree: bool = False,
|
| 1386 |
+
paths: Union[None, Iterable[PathLike]] = None,
|
| 1387 |
+
head: bool = False,
|
| 1388 |
+
**kwargs: Any,
|
| 1389 |
+
) -> "IndexFile":
|
| 1390 |
+
"""Reset the index to reflect the tree at the given commit. This will not adjust
|
| 1391 |
+
our HEAD reference by default, as opposed to
|
| 1392 |
+
:meth:`HEAD.reset <git.refs.head.HEAD.reset>`.
|
| 1393 |
+
|
| 1394 |
+
:param commit:
|
| 1395 |
+
Revision, :class:`~git.refs.reference.Reference` or
|
| 1396 |
+
:class:`~git.objects.commit.Commit` specifying the commit we should
|
| 1397 |
+
represent.
|
| 1398 |
+
|
| 1399 |
+
If you want to specify a tree only, use :meth:`IndexFile.from_tree` and
|
| 1400 |
+
overwrite the default index.
|
| 1401 |
+
|
| 1402 |
+
:param working_tree:
|
| 1403 |
+
If ``True``, the files in the working tree will reflect the changed index.
|
| 1404 |
+
If ``False``, the working tree will not be touched.
|
| 1405 |
+
Please note that changes to the working copy will be discarded without
|
| 1406 |
+
warning!
|
| 1407 |
+
|
| 1408 |
+
:param head:
|
| 1409 |
+
If ``True``, the head will be set to the given commit. This is ``False`` by
|
| 1410 |
+
default, but if ``True``, this method behaves like
|
| 1411 |
+
:meth:`HEAD.reset <git.refs.head.HEAD.reset>`.
|
| 1412 |
+
|
| 1413 |
+
:param paths:
|
| 1414 |
+
If given as an iterable of absolute or repository-relative paths, only these
|
| 1415 |
+
will be reset to their state at the given commit-ish.
|
| 1416 |
+
The paths need to exist at the commit, otherwise an exception will be
|
| 1417 |
+
raised.
|
| 1418 |
+
|
| 1419 |
+
:param kwargs:
|
| 1420 |
+
Additional keyword arguments passed to :manpage:`git-reset(1)`.
|
| 1421 |
+
|
| 1422 |
+
:note:
|
| 1423 |
+
:meth:`IndexFile.reset`, as opposed to
|
| 1424 |
+
:meth:`HEAD.reset <git.refs.head.HEAD.reset>`, will not delete any files in
|
| 1425 |
+
order to maintain a consistent working tree. Instead, it will just check out
|
| 1426 |
+
the files according to their state in the index.
|
| 1427 |
+
If you want :manpage:`git-reset(1)`-like behaviour, use
|
| 1428 |
+
:meth:`HEAD.reset <git.refs.head.HEAD.reset>` instead.
|
| 1429 |
+
|
| 1430 |
+
:return:
|
| 1431 |
+
self
|
| 1432 |
+
"""
|
| 1433 |
+
# What we actually want to do is to merge the tree into our existing index,
|
| 1434 |
+
# which is what git-read-tree does.
|
| 1435 |
+
new_inst = type(self).from_tree(self.repo, commit)
|
| 1436 |
+
if not paths:
|
| 1437 |
+
self.entries = new_inst.entries
|
| 1438 |
+
else:
|
| 1439 |
+
nie = new_inst.entries
|
| 1440 |
+
for path in paths:
|
| 1441 |
+
path = self._to_relative_path(path)
|
| 1442 |
+
try:
|
| 1443 |
+
key = entry_key(path, 0)
|
| 1444 |
+
self.entries[key] = nie[key]
|
| 1445 |
+
except KeyError:
|
| 1446 |
+
# If key is not in theirs, it musn't be in ours.
|
| 1447 |
+
try:
|
| 1448 |
+
del self.entries[key]
|
| 1449 |
+
except KeyError:
|
| 1450 |
+
pass
|
| 1451 |
+
# END handle deletion keyerror
|
| 1452 |
+
# END handle keyerror
|
| 1453 |
+
# END for each path
|
| 1454 |
+
# END handle paths
|
| 1455 |
+
self.write()
|
| 1456 |
+
|
| 1457 |
+
if working_tree:
|
| 1458 |
+
self.checkout(paths=paths, force=True)
|
| 1459 |
+
# END handle working tree
|
| 1460 |
+
|
| 1461 |
+
if head:
|
| 1462 |
+
self.repo.head.set_commit(self.repo.commit(commit), logmsg="%s: Updating HEAD" % commit)
|
| 1463 |
+
# END handle head change
|
| 1464 |
+
|
| 1465 |
+
return self
|
| 1466 |
+
|
| 1467 |
+
# FIXME: This is documented to accept the same parameters as Diffable.diff, but this
|
| 1468 |
+
# does not handle NULL_TREE for `other`. (The suppressed mypy error is about this.)
|
| 1469 |
+
def diff(
|
| 1470 |
+
self,
|
| 1471 |
+
other: Union[ # type: ignore[override]
|
| 1472 |
+
Literal[git_diff.DiffConstants.INDEX],
|
| 1473 |
+
"Tree",
|
| 1474 |
+
"Commit",
|
| 1475 |
+
str,
|
| 1476 |
+
None,
|
| 1477 |
+
] = git_diff.INDEX,
|
| 1478 |
+
paths: Union[PathLike, List[PathLike], Tuple[PathLike, ...], None] = None,
|
| 1479 |
+
create_patch: bool = False,
|
| 1480 |
+
**kwargs: Any,
|
| 1481 |
+
) -> git_diff.DiffIndex:
|
| 1482 |
+
"""Diff this index against the working copy or a :class:`~git.objects.tree.Tree`
|
| 1483 |
+
or :class:`~git.objects.commit.Commit` object.
|
| 1484 |
+
|
| 1485 |
+
For documentation of the parameters and return values, see
|
| 1486 |
+
:meth:`Diffable.diff <git.diff.Diffable.diff>`.
|
| 1487 |
+
|
| 1488 |
+
:note:
|
| 1489 |
+
Will only work with indices that represent the default git index as they
|
| 1490 |
+
have not been initialized with a stream.
|
| 1491 |
+
"""
|
| 1492 |
+
# Only run if we are the default repository index.
|
| 1493 |
+
if self._file_path != self._index_path():
|
| 1494 |
+
raise AssertionError("Cannot call %r on indices that do not represent the default git index" % self.diff())
|
| 1495 |
+
# Index against index is always empty.
|
| 1496 |
+
if other is self.INDEX:
|
| 1497 |
+
return git_diff.DiffIndex()
|
| 1498 |
+
|
| 1499 |
+
# Index against anything but None is a reverse diff with the respective item.
|
| 1500 |
+
# Handle existing -R flags properly.
|
| 1501 |
+
# Transform strings to the object so that we can call diff on it.
|
| 1502 |
+
if isinstance(other, str):
|
| 1503 |
+
other = self.repo.rev_parse(other)
|
| 1504 |
+
# END object conversion
|
| 1505 |
+
|
| 1506 |
+
if isinstance(other, Object): # For Tree or Commit.
|
| 1507 |
+
# Invert the existing R flag.
|
| 1508 |
+
cur_val = kwargs.get("R", False)
|
| 1509 |
+
kwargs["R"] = not cur_val
|
| 1510 |
+
return other.diff(self.INDEX, paths, create_patch, **kwargs)
|
| 1511 |
+
# END diff against other item handling
|
| 1512 |
+
|
| 1513 |
+
# If other is not None here, something is wrong.
|
| 1514 |
+
if other is not None:
|
| 1515 |
+
raise ValueError("other must be None, Diffable.INDEX, a Tree or Commit, was %r" % other)
|
| 1516 |
+
|
| 1517 |
+
# Diff against working copy - can be handled by superclass natively.
|
| 1518 |
+
return super().diff(other, paths, create_patch, **kwargs)
|
parrot/lib/python3.10/site-packages/git/index/util.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This module is part of GitPython and is released under the
|
| 2 |
+
# 3-Clause BSD License: https://opensource.org/license/bsd-3-clause/
|
| 3 |
+
|
| 4 |
+
"""Index utilities."""
|
| 5 |
+
|
| 6 |
+
__all__ = ["TemporaryFileSwap", "post_clear_cache", "default_index", "git_working_dir"]
|
| 7 |
+
|
| 8 |
+
import contextlib
|
| 9 |
+
from functools import wraps
|
| 10 |
+
import os
|
| 11 |
+
import os.path as osp
|
| 12 |
+
import struct
|
| 13 |
+
import tempfile
|
| 14 |
+
from types import TracebackType
|
| 15 |
+
|
| 16 |
+
# typing ----------------------------------------------------------------------
|
| 17 |
+
|
| 18 |
+
from typing import Any, Callable, TYPE_CHECKING, Optional, Type
|
| 19 |
+
|
| 20 |
+
from git.types import Literal, PathLike, _T
|
| 21 |
+
|
| 22 |
+
if TYPE_CHECKING:
|
| 23 |
+
from git.index import IndexFile
|
| 24 |
+
|
| 25 |
+
# ---------------------------------------------------------------------------------
|
| 26 |
+
|
| 27 |
+
# { Aliases
|
| 28 |
+
pack = struct.pack
|
| 29 |
+
unpack = struct.unpack
|
| 30 |
+
# } END aliases
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class TemporaryFileSwap:
|
| 34 |
+
"""Utility class moving a file to a temporary location within the same directory and
|
| 35 |
+
moving it back on to where on object deletion."""
|
| 36 |
+
|
| 37 |
+
__slots__ = ("file_path", "tmp_file_path")
|
| 38 |
+
|
| 39 |
+
def __init__(self, file_path: PathLike) -> None:
|
| 40 |
+
self.file_path = file_path
|
| 41 |
+
dirname, basename = osp.split(file_path)
|
| 42 |
+
fd, self.tmp_file_path = tempfile.mkstemp(prefix=basename, dir=dirname)
|
| 43 |
+
os.close(fd)
|
| 44 |
+
with contextlib.suppress(OSError): # It may be that the source does not exist.
|
| 45 |
+
os.replace(self.file_path, self.tmp_file_path)
|
| 46 |
+
|
| 47 |
+
def __enter__(self) -> "TemporaryFileSwap":
|
| 48 |
+
return self
|
| 49 |
+
|
| 50 |
+
def __exit__(
|
| 51 |
+
self,
|
| 52 |
+
exc_type: Optional[Type[BaseException]],
|
| 53 |
+
exc_val: Optional[BaseException],
|
| 54 |
+
exc_tb: Optional[TracebackType],
|
| 55 |
+
) -> Literal[False]:
|
| 56 |
+
if osp.isfile(self.tmp_file_path):
|
| 57 |
+
os.replace(self.tmp_file_path, self.file_path)
|
| 58 |
+
return False
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# { Decorators
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def post_clear_cache(func: Callable[..., _T]) -> Callable[..., _T]:
|
| 65 |
+
"""Decorator for functions that alter the index using the git command.
|
| 66 |
+
|
| 67 |
+
When a git command alters the index, this invalidates our possibly existing entries
|
| 68 |
+
dictionary, which is why it must be deleted to allow it to be lazily reread later.
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
@wraps(func)
|
| 72 |
+
def post_clear_cache_if_not_raised(self: "IndexFile", *args: Any, **kwargs: Any) -> _T:
|
| 73 |
+
rval = func(self, *args, **kwargs)
|
| 74 |
+
self._delete_entries_cache()
|
| 75 |
+
return rval
|
| 76 |
+
|
| 77 |
+
# END wrapper method
|
| 78 |
+
|
| 79 |
+
return post_clear_cache_if_not_raised
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def default_index(func: Callable[..., _T]) -> Callable[..., _T]:
|
| 83 |
+
"""Decorator ensuring the wrapped method may only run if we are the default
|
| 84 |
+
repository index.
|
| 85 |
+
|
| 86 |
+
This is as we rely on git commands that operate on that index only.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
@wraps(func)
|
| 90 |
+
def check_default_index(self: "IndexFile", *args: Any, **kwargs: Any) -> _T:
|
| 91 |
+
if self._file_path != self._index_path():
|
| 92 |
+
raise AssertionError(
|
| 93 |
+
"Cannot call %r on indices that do not represent the default git index" % func.__name__
|
| 94 |
+
)
|
| 95 |
+
return func(self, *args, **kwargs)
|
| 96 |
+
|
| 97 |
+
# END wrapper method
|
| 98 |
+
|
| 99 |
+
return check_default_index
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def git_working_dir(func: Callable[..., _T]) -> Callable[..., _T]:
|
| 103 |
+
"""Decorator which changes the current working dir to the one of the git
|
| 104 |
+
repository in order to ensure relative paths are handled correctly."""
|
| 105 |
+
|
| 106 |
+
@wraps(func)
|
| 107 |
+
def set_git_working_dir(self: "IndexFile", *args: Any, **kwargs: Any) -> _T:
|
| 108 |
+
cur_wd = os.getcwd()
|
| 109 |
+
os.chdir(str(self.repo.working_tree_dir))
|
| 110 |
+
try:
|
| 111 |
+
return func(self, *args, **kwargs)
|
| 112 |
+
finally:
|
| 113 |
+
os.chdir(cur_wd)
|
| 114 |
+
# END handle working dir
|
| 115 |
+
|
| 116 |
+
# END wrapper
|
| 117 |
+
|
| 118 |
+
return set_git_working_dir
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# } END decorators
|
parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:757403bb67f240cd69d8ba9279c1a9dc974b8549eb1372c7f8ae63b5549e88c6
|
| 3 |
+
size 135486
|
parrot/lib/python3.10/site-packages/imageio/__init__.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Copyright (c) 2014-2020, imageio contributors
|
| 3 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 4 |
+
|
| 5 |
+
# This docstring is used at the index of the documentation pages, and
|
| 6 |
+
# gets inserted into a slightly larger description (in setup.py) for
|
| 7 |
+
# the page on Pypi:
|
| 8 |
+
"""
|
| 9 |
+
Imageio is a Python library that provides an easy interface to read and
|
| 10 |
+
write a wide range of image data, including animated images, volumetric
|
| 11 |
+
data, and scientific formats. It is cross-platform, runs on Python 3.5+,
|
| 12 |
+
and is easy to install.
|
| 13 |
+
|
| 14 |
+
Main website: https://imageio.readthedocs.io/
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
# flake8: noqa
|
| 18 |
+
|
| 19 |
+
__version__ = "2.35.1"
|
| 20 |
+
|
| 21 |
+
import warnings
|
| 22 |
+
|
| 23 |
+
# Load some bits from core
|
| 24 |
+
from .core import FormatManager, RETURN_BYTES
|
| 25 |
+
|
| 26 |
+
# Instantiate the old format manager
|
| 27 |
+
formats = FormatManager()
|
| 28 |
+
show_formats = formats.show
|
| 29 |
+
|
| 30 |
+
from . import v2
|
| 31 |
+
from . import v3
|
| 32 |
+
from . import plugins
|
| 33 |
+
|
| 34 |
+
# import config after core to avoid circular import
|
| 35 |
+
from . import config
|
| 36 |
+
|
| 37 |
+
# import all APIs into the top level (meta API)
|
| 38 |
+
from .v2 import (
|
| 39 |
+
imread as imread_v2,
|
| 40 |
+
mimread,
|
| 41 |
+
volread,
|
| 42 |
+
mvolread,
|
| 43 |
+
imwrite,
|
| 44 |
+
mimwrite,
|
| 45 |
+
volwrite,
|
| 46 |
+
mvolwrite,
|
| 47 |
+
# aliases
|
| 48 |
+
get_reader as read,
|
| 49 |
+
get_writer as save,
|
| 50 |
+
imwrite as imsave,
|
| 51 |
+
mimwrite as mimsave,
|
| 52 |
+
volwrite as volsave,
|
| 53 |
+
mvolwrite as mvolsave,
|
| 54 |
+
# misc
|
| 55 |
+
help,
|
| 56 |
+
get_reader,
|
| 57 |
+
get_writer,
|
| 58 |
+
)
|
| 59 |
+
from .v3 import (
|
| 60 |
+
imopen,
|
| 61 |
+
# imread, # Will take over once v3 is released
|
| 62 |
+
# imwrite, # Will take over once v3 is released
|
| 63 |
+
imiter,
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def imread(uri, format=None, **kwargs):
|
| 68 |
+
"""imread(uri, format=None, **kwargs)
|
| 69 |
+
|
| 70 |
+
Reads an image from the specified file. Returns a numpy array, which
|
| 71 |
+
comes with a dict of meta data at its 'meta' attribute.
|
| 72 |
+
|
| 73 |
+
Note that the image data is returned as-is, and may not always have
|
| 74 |
+
a dtype of uint8 (and thus may differ from what e.g. PIL returns).
|
| 75 |
+
|
| 76 |
+
Parameters
|
| 77 |
+
----------
|
| 78 |
+
uri : {str, pathlib.Path, bytes, file}
|
| 79 |
+
The resource to load the image from, e.g. a filename, pathlib.Path,
|
| 80 |
+
http address or file object, see the docs for more info.
|
| 81 |
+
format : str
|
| 82 |
+
The format to use to read the file. By default imageio selects
|
| 83 |
+
the appropriate for you based on the filename and its contents.
|
| 84 |
+
kwargs : ...
|
| 85 |
+
Further keyword arguments are passed to the reader. See :func:`.help`
|
| 86 |
+
to see what arguments are available for a particular format.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
warnings.warn(
|
| 90 |
+
"Starting with ImageIO v3 the behavior of this function will switch to that of"
|
| 91 |
+
" iio.v3.imread. To keep the current behavior (and make this warning disappear)"
|
| 92 |
+
" use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.",
|
| 93 |
+
DeprecationWarning,
|
| 94 |
+
stacklevel=2,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
return imread_v2(uri, format=format, **kwargs)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
__all__ = [
|
| 101 |
+
"v2",
|
| 102 |
+
"v3",
|
| 103 |
+
"config",
|
| 104 |
+
"plugins",
|
| 105 |
+
# v3 API
|
| 106 |
+
"imopen",
|
| 107 |
+
"imread",
|
| 108 |
+
"imwrite",
|
| 109 |
+
"imiter",
|
| 110 |
+
# v2 API
|
| 111 |
+
"mimread",
|
| 112 |
+
"volread",
|
| 113 |
+
"mvolread",
|
| 114 |
+
"imwrite",
|
| 115 |
+
"mimwrite",
|
| 116 |
+
"volwrite",
|
| 117 |
+
"mvolwrite",
|
| 118 |
+
# v2 aliases
|
| 119 |
+
"read",
|
| 120 |
+
"save",
|
| 121 |
+
"imsave",
|
| 122 |
+
"mimsave",
|
| 123 |
+
"volsave",
|
| 124 |
+
"mvolsave",
|
| 125 |
+
# functions to deprecate
|
| 126 |
+
"help",
|
| 127 |
+
"get_reader",
|
| 128 |
+
"get_writer",
|
| 129 |
+
"formats",
|
| 130 |
+
"show_formats",
|
| 131 |
+
]
|
parrot/lib/python3.10/site-packages/imageio/__main__.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Console scripts and associated helper methods for imageio.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import argparse
|
| 6 |
+
import os
|
| 7 |
+
from os import path as op
|
| 8 |
+
import shutil
|
| 9 |
+
import sys
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
from . import plugins
|
| 13 |
+
from .core import util
|
| 14 |
+
|
| 15 |
+
# A list of plugins that require binaries from the imageio-binaries
|
| 16 |
+
# repository. These plugins must implement the `download` method.
|
| 17 |
+
PLUGINS_WITH_BINARIES = ["freeimage"]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def download_bin(plugin_names=["all"], package_dir=False):
|
| 21 |
+
"""Download binary dependencies of plugins
|
| 22 |
+
|
| 23 |
+
This is a convenience method for downloading the binaries
|
| 24 |
+
(e.g. for freeimage) from the imageio-binaries
|
| 25 |
+
repository.
|
| 26 |
+
|
| 27 |
+
Parameters
|
| 28 |
+
----------
|
| 29 |
+
plugin_names: list
|
| 30 |
+
A list of imageio plugin names. If it contains "all", all
|
| 31 |
+
binary dependencies are downloaded.
|
| 32 |
+
package_dir: bool
|
| 33 |
+
If set to `True`, the binaries will be downloaded to the
|
| 34 |
+
`resources` directory of the imageio package instead of
|
| 35 |
+
to the users application data directory. Note that this
|
| 36 |
+
might require administrative rights if imageio is installed
|
| 37 |
+
in a system directory.
|
| 38 |
+
"""
|
| 39 |
+
if plugin_names.count("all"):
|
| 40 |
+
# Use all plugins
|
| 41 |
+
plugin_names = PLUGINS_WITH_BINARIES
|
| 42 |
+
|
| 43 |
+
plugin_names.sort()
|
| 44 |
+
print("Ascertaining binaries for: {}.".format(", ".join(plugin_names)))
|
| 45 |
+
|
| 46 |
+
if package_dir:
|
| 47 |
+
# Download the binaries to the `resources` directory
|
| 48 |
+
# of imageio. If imageio comes as an .egg, then a cache
|
| 49 |
+
# directory will be created by pkg_resources (requires setuptools).
|
| 50 |
+
# see `imageio.core.util.resource_dirs`
|
| 51 |
+
# and `imageio.core.utilresource_package_dir`
|
| 52 |
+
directory = util.resource_package_dir()
|
| 53 |
+
else:
|
| 54 |
+
directory = None
|
| 55 |
+
|
| 56 |
+
for plg in plugin_names:
|
| 57 |
+
if plg not in PLUGINS_WITH_BINARIES:
|
| 58 |
+
msg = "Plugin {} not registered for binary download!".format(plg)
|
| 59 |
+
raise Exception(msg)
|
| 60 |
+
mod = getattr(plugins, plg)
|
| 61 |
+
mod.download(directory=directory)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def download_bin_main():
|
| 65 |
+
"""Argument-parsing wrapper for `download_bin`"""
|
| 66 |
+
description = "Download plugin binary dependencies"
|
| 67 |
+
phelp = (
|
| 68 |
+
"Plugin name for which to download the binary. "
|
| 69 |
+
+ "If no argument is given, all binaries are downloaded."
|
| 70 |
+
)
|
| 71 |
+
dhelp = (
|
| 72 |
+
"Download the binaries to the package directory "
|
| 73 |
+
+ "(default is the users application data directory). "
|
| 74 |
+
+ "This might require administrative rights."
|
| 75 |
+
)
|
| 76 |
+
example_text = (
|
| 77 |
+
"examples:\n"
|
| 78 |
+
+ " imageio_download_bin all\n"
|
| 79 |
+
+ " imageio_download_bin freeimage\n"
|
| 80 |
+
)
|
| 81 |
+
parser = argparse.ArgumentParser(
|
| 82 |
+
description=description,
|
| 83 |
+
epilog=example_text,
|
| 84 |
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 85 |
+
)
|
| 86 |
+
parser.add_argument("plugin", type=str, nargs="*", default="all", help=phelp)
|
| 87 |
+
parser.add_argument(
|
| 88 |
+
"--package-dir",
|
| 89 |
+
dest="package_dir",
|
| 90 |
+
action="store_true",
|
| 91 |
+
default=False,
|
| 92 |
+
help=dhelp,
|
| 93 |
+
)
|
| 94 |
+
args = parser.parse_args()
|
| 95 |
+
download_bin(plugin_names=args.plugin, package_dir=args.package_dir)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def remove_bin(plugin_names=["all"]):
|
| 99 |
+
"""Remove binary dependencies of plugins
|
| 100 |
+
|
| 101 |
+
This is a convenience method that removes all binaries
|
| 102 |
+
dependencies for plugins downloaded by imageio.
|
| 103 |
+
|
| 104 |
+
Notes
|
| 105 |
+
-----
|
| 106 |
+
It only makes sense to use this method if the binaries
|
| 107 |
+
are corrupt.
|
| 108 |
+
"""
|
| 109 |
+
if plugin_names.count("all"):
|
| 110 |
+
# Use all plugins
|
| 111 |
+
plugin_names = PLUGINS_WITH_BINARIES
|
| 112 |
+
|
| 113 |
+
print("Removing binaries for: {}.".format(", ".join(plugin_names)))
|
| 114 |
+
|
| 115 |
+
rdirs = util.resource_dirs()
|
| 116 |
+
|
| 117 |
+
for plg in plugin_names:
|
| 118 |
+
if plg not in PLUGINS_WITH_BINARIES:
|
| 119 |
+
msg = "Plugin {} not registered for binary download!".format(plg)
|
| 120 |
+
raise Exception(msg)
|
| 121 |
+
|
| 122 |
+
not_removed = []
|
| 123 |
+
for rd in rdirs:
|
| 124 |
+
# plugin name is in subdirectories
|
| 125 |
+
for rsub in os.listdir(rd):
|
| 126 |
+
if rsub in plugin_names:
|
| 127 |
+
plgdir = op.join(rd, rsub)
|
| 128 |
+
try:
|
| 129 |
+
shutil.rmtree(plgdir)
|
| 130 |
+
except Exception:
|
| 131 |
+
not_removed.append(plgdir)
|
| 132 |
+
if not_removed:
|
| 133 |
+
nrs = ",".join(not_removed)
|
| 134 |
+
msg2 = (
|
| 135 |
+
"These plugins files could not be removed: {}\n".format(nrs)
|
| 136 |
+
+ "Make sure they are not used by any program and try again."
|
| 137 |
+
)
|
| 138 |
+
raise Exception(msg2)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def remove_bin_main():
|
| 142 |
+
"""Argument-parsing wrapper for `remove_bin`"""
|
| 143 |
+
description = "Remove plugin binary dependencies"
|
| 144 |
+
phelp = (
|
| 145 |
+
"Plugin name for which to remove the binary. "
|
| 146 |
+
+ "If no argument is given, all binaries are removed."
|
| 147 |
+
)
|
| 148 |
+
example_text = (
|
| 149 |
+
"examples:\n"
|
| 150 |
+
+ " imageio_remove_bin all\n"
|
| 151 |
+
+ " imageio_remove_bin freeimage\n"
|
| 152 |
+
)
|
| 153 |
+
parser = argparse.ArgumentParser(
|
| 154 |
+
description=description,
|
| 155 |
+
epilog=example_text,
|
| 156 |
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 157 |
+
)
|
| 158 |
+
parser.add_argument("plugin", type=str, nargs="*", default="all", help=phelp)
|
| 159 |
+
args = parser.parse_args()
|
| 160 |
+
remove_bin(plugin_names=args.plugin)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
if __name__ == "__main__":
|
| 164 |
+
if len(sys.argv) > 1 and sys.argv[1] == "download_bin":
|
| 165 |
+
download_bin_main()
|
| 166 |
+
elif len(sys.argv) > 1 and sys.argv[1] == "remove_bin":
|
| 167 |
+
remove_bin_main()
|
| 168 |
+
else:
|
| 169 |
+
raise RuntimeError("Invalid use of the imageio CLI")
|
parrot/lib/python3.10/site-packages/imageio/config/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .extensions import (
|
| 2 |
+
extension_list,
|
| 3 |
+
known_extensions,
|
| 4 |
+
FileExtension,
|
| 5 |
+
video_extensions,
|
| 6 |
+
)
|
| 7 |
+
from .plugins import known_plugins, PluginConfig
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
"known_plugins",
|
| 11 |
+
"PluginConfig",
|
| 12 |
+
"extension_list",
|
| 13 |
+
"known_extensions",
|
| 14 |
+
"FileExtension",
|
| 15 |
+
"video_extensions",
|
| 16 |
+
]
|
parrot/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (412 Bytes). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc
ADDED
|
Binary file (32.5 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/config/__pycache__/plugins.cpython-310.pyc
ADDED
|
Binary file (10.9 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/config/extensions.py
ADDED
|
@@ -0,0 +1,2002 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A set of objects representing each file extension recognized by ImageIO. If an
|
| 3 |
+
extension is not listed here it is still supported, as long as there exists a
|
| 4 |
+
supporting backend.
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class FileExtension:
|
| 10 |
+
"""File Extension Metadata
|
| 11 |
+
|
| 12 |
+
This class holds information about a image file format associated with a
|
| 13 |
+
given extension. This information is used to track plugins that are known to
|
| 14 |
+
be able to handle a particular format. It also contains additional
|
| 15 |
+
information about a format, which is used when creating the supported format
|
| 16 |
+
docs.
|
| 17 |
+
|
| 18 |
+
Plugins known to be able to handle this format are ordered by a ``priority``
|
| 19 |
+
list. This list is used to determine the ideal plugin to use when choosing a
|
| 20 |
+
plugin based on file extension.
|
| 21 |
+
|
| 22 |
+
Parameters
|
| 23 |
+
----------
|
| 24 |
+
extension : str
|
| 25 |
+
The name of the extension including the initial dot, e.g. ".png".
|
| 26 |
+
priority : List
|
| 27 |
+
A list of plugin names (entries in config.known_plugins) that can handle
|
| 28 |
+
this format. The position of a plugin expresses a preference, e.g.
|
| 29 |
+
["plugin1", "plugin2"] indicates that, if available, plugin1 should be
|
| 30 |
+
preferred over plugin2 when handling a request related to this format.
|
| 31 |
+
name : str
|
| 32 |
+
The full name of the format.
|
| 33 |
+
description : str
|
| 34 |
+
A description of the format.
|
| 35 |
+
external_link : str
|
| 36 |
+
A link to further information about the format. Typically, the format's
|
| 37 |
+
specification.
|
| 38 |
+
volume_support : str
|
| 39 |
+
If True, the format/extension supports volumetric image data.
|
| 40 |
+
|
| 41 |
+
Examples
|
| 42 |
+
--------
|
| 43 |
+
>>> FileExtension(
|
| 44 |
+
name="Bitmap",
|
| 45 |
+
extension=".bmp",
|
| 46 |
+
priority=["pillow", "BMP-PIL", "BMP-FI", "ITK"],
|
| 47 |
+
external_link="https://en.wikipedia.org/wiki/BMP_file_format",
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
*,
|
| 55 |
+
extension,
|
| 56 |
+
priority,
|
| 57 |
+
name=None,
|
| 58 |
+
description=None,
|
| 59 |
+
external_link=None,
|
| 60 |
+
volume_support=False
|
| 61 |
+
):
|
| 62 |
+
self.extension = extension
|
| 63 |
+
self.priority = priority
|
| 64 |
+
self.name = name
|
| 65 |
+
self.description = description
|
| 66 |
+
self.external_link = external_link
|
| 67 |
+
self.default_priority = priority.copy()
|
| 68 |
+
self.volume_support = volume_support
|
| 69 |
+
|
| 70 |
+
def reset(self):
|
| 71 |
+
self.priority = self.default_priority.copy()
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
extension_list = [
|
| 75 |
+
FileExtension(
|
| 76 |
+
name="Hasselblad raw",
|
| 77 |
+
extension=".3fr",
|
| 78 |
+
priority=["RAW-FI"],
|
| 79 |
+
),
|
| 80 |
+
FileExtension(
|
| 81 |
+
name="Sony alpha",
|
| 82 |
+
extension=".arw",
|
| 83 |
+
priority=["RAW-FI"],
|
| 84 |
+
),
|
| 85 |
+
FileExtension(
|
| 86 |
+
name="Animated Portable Network Graphics",
|
| 87 |
+
external_link="https://en.wikipedia.org/wiki/APNG",
|
| 88 |
+
extension=".apng",
|
| 89 |
+
priority=["pillow", "pyav"],
|
| 90 |
+
),
|
| 91 |
+
FileExtension(
|
| 92 |
+
name="Audio Video Interleave",
|
| 93 |
+
extension=".avi",
|
| 94 |
+
priority=["FFMPEG"],
|
| 95 |
+
),
|
| 96 |
+
FileExtension(
|
| 97 |
+
name="Casio raw format",
|
| 98 |
+
extension=".bay",
|
| 99 |
+
priority=["RAW-FI"],
|
| 100 |
+
),
|
| 101 |
+
FileExtension(
|
| 102 |
+
extension=".blp",
|
| 103 |
+
priority=["pillow"],
|
| 104 |
+
),
|
| 105 |
+
FileExtension(
|
| 106 |
+
name="Bitmap",
|
| 107 |
+
extension=".bmp",
|
| 108 |
+
priority=["pillow", "BMP-PIL", "BMP-FI", "ITK", "pyav", "opencv"],
|
| 109 |
+
external_link="https://en.wikipedia.org/wiki/BMP_file_format",
|
| 110 |
+
),
|
| 111 |
+
FileExtension(
|
| 112 |
+
name="Device-Independent Bitmap",
|
| 113 |
+
extension=".dip",
|
| 114 |
+
priority=["opencv"],
|
| 115 |
+
external_link="https://en.wikipedia.org/wiki/BMP_file_format",
|
| 116 |
+
),
|
| 117 |
+
FileExtension(
|
| 118 |
+
name="Re-Volt mipmap",
|
| 119 |
+
extension=".bmq",
|
| 120 |
+
priority=["RAW-FI"],
|
| 121 |
+
),
|
| 122 |
+
FileExtension(
|
| 123 |
+
name="Binary Structured Data Format",
|
| 124 |
+
extension=".bsdf",
|
| 125 |
+
priority=["BSDF"],
|
| 126 |
+
external_link="http://bsdf.io/",
|
| 127 |
+
),
|
| 128 |
+
FileExtension(
|
| 129 |
+
name="Binary Universal Form for the Representation of meteorological data",
|
| 130 |
+
extension=".bufr",
|
| 131 |
+
priority=["pillow", "BUFR-PIL"],
|
| 132 |
+
),
|
| 133 |
+
FileExtension(
|
| 134 |
+
name="Silicon Graphics Image",
|
| 135 |
+
extension=".bw",
|
| 136 |
+
priority=["pillow", "SGI-PIL", "SGI-FI"],
|
| 137 |
+
),
|
| 138 |
+
FileExtension(
|
| 139 |
+
name="Scirra Construct",
|
| 140 |
+
extension=".cap",
|
| 141 |
+
priority=["RAW-FI"],
|
| 142 |
+
),
|
| 143 |
+
FileExtension(
|
| 144 |
+
name="AMETEK High Speed Camera Format",
|
| 145 |
+
extension=".cine",
|
| 146 |
+
priority=["RAW-FI"],
|
| 147 |
+
external_link="https://phantomhighspeed-knowledge.secure.force.com/servlet/fileField?id=0BE1N000000kD2i#:~:text=Cine%20is%20a%20video%20file,camera%20model%20and%20image%20resolution",
|
| 148 |
+
),
|
| 149 |
+
FileExtension(extension=".cr2", priority=["RAW-FI"]),
|
| 150 |
+
FileExtension(
|
| 151 |
+
extension=".crw",
|
| 152 |
+
priority=["RAW-FI"],
|
| 153 |
+
),
|
| 154 |
+
FileExtension(
|
| 155 |
+
extension=".cs1",
|
| 156 |
+
priority=["RAW-FI"],
|
| 157 |
+
),
|
| 158 |
+
FileExtension(
|
| 159 |
+
name="Computerized Tomography",
|
| 160 |
+
extension=".ct",
|
| 161 |
+
priority=["DICOM"],
|
| 162 |
+
),
|
| 163 |
+
FileExtension(
|
| 164 |
+
name="Windows Cursor Icons",
|
| 165 |
+
extension=".cur",
|
| 166 |
+
priority=["pillow", "CUR-PIL"],
|
| 167 |
+
),
|
| 168 |
+
FileExtension(
|
| 169 |
+
name="Dr. Halo",
|
| 170 |
+
extension=".cut",
|
| 171 |
+
priority=["CUT-FI"],
|
| 172 |
+
),
|
| 173 |
+
FileExtension(
|
| 174 |
+
extension=".dc2",
|
| 175 |
+
priority=["RAW-FI"],
|
| 176 |
+
),
|
| 177 |
+
FileExtension(
|
| 178 |
+
name="DICOM file format",
|
| 179 |
+
extension=".dcm",
|
| 180 |
+
priority=["DICOM", "ITK"],
|
| 181 |
+
),
|
| 182 |
+
FileExtension(
|
| 183 |
+
extension=".dcr",
|
| 184 |
+
priority=["RAW-FI"],
|
| 185 |
+
),
|
| 186 |
+
FileExtension(
|
| 187 |
+
name="Intel DCX",
|
| 188 |
+
extension=".dcx",
|
| 189 |
+
priority=["pillow", "DCX-PIL"],
|
| 190 |
+
),
|
| 191 |
+
FileExtension(
|
| 192 |
+
name="DirectX Texture Container",
|
| 193 |
+
extension=".dds",
|
| 194 |
+
priority=["pillow", "DDS-FI", "DDS-PIL"],
|
| 195 |
+
),
|
| 196 |
+
FileExtension(
|
| 197 |
+
name="Windows Bitmap",
|
| 198 |
+
extension=".dib",
|
| 199 |
+
priority=["pillow", "DIB-PIL"],
|
| 200 |
+
),
|
| 201 |
+
FileExtension(
|
| 202 |
+
name="DICOM file format",
|
| 203 |
+
extension=".dicom",
|
| 204 |
+
priority=["ITK"],
|
| 205 |
+
),
|
| 206 |
+
FileExtension(
|
| 207 |
+
extension=".dng",
|
| 208 |
+
priority=["RAW-FI"],
|
| 209 |
+
),
|
| 210 |
+
FileExtension(
|
| 211 |
+
extension=".drf",
|
| 212 |
+
priority=["RAW-FI"],
|
| 213 |
+
),
|
| 214 |
+
FileExtension(
|
| 215 |
+
extension=".dsc",
|
| 216 |
+
priority=["RAW-FI"],
|
| 217 |
+
),
|
| 218 |
+
FileExtension(
|
| 219 |
+
name="Enhanced Compression Wavelet",
|
| 220 |
+
extension=".ecw",
|
| 221 |
+
priority=["GDAL"],
|
| 222 |
+
),
|
| 223 |
+
FileExtension(
|
| 224 |
+
name="Windows Metafile",
|
| 225 |
+
extension=".emf",
|
| 226 |
+
priority=["pillow", "WMF-PIL"],
|
| 227 |
+
),
|
| 228 |
+
FileExtension(
|
| 229 |
+
name="Encapsulated Postscript",
|
| 230 |
+
extension=".eps",
|
| 231 |
+
priority=["pillow", "EPS-PIL"],
|
| 232 |
+
),
|
| 233 |
+
FileExtension(
|
| 234 |
+
extension=".erf",
|
| 235 |
+
priority=["RAW-FI"],
|
| 236 |
+
),
|
| 237 |
+
FileExtension(
|
| 238 |
+
name="OpenEXR",
|
| 239 |
+
extension=".exr",
|
| 240 |
+
external_link="https://openexr.readthedocs.io/en/latest/",
|
| 241 |
+
priority=["EXR-FI", "pyav", "opencv"],
|
| 242 |
+
),
|
| 243 |
+
FileExtension(
|
| 244 |
+
extension=".fff",
|
| 245 |
+
priority=["RAW-FI"],
|
| 246 |
+
),
|
| 247 |
+
FileExtension(
|
| 248 |
+
name="Flexible Image Transport System File",
|
| 249 |
+
extension=".fit",
|
| 250 |
+
priority=["pillow", "FITS-PIL", "FITS"],
|
| 251 |
+
),
|
| 252 |
+
FileExtension(
|
| 253 |
+
name="Flexible Image Transport System File",
|
| 254 |
+
extension=".fits",
|
| 255 |
+
priority=["pillow", "FITS-PIL", "FITS", "pyav"],
|
| 256 |
+
),
|
| 257 |
+
FileExtension(
|
| 258 |
+
name="Autodesk FLC Animation",
|
| 259 |
+
extension=".flc",
|
| 260 |
+
priority=["pillow", "FLI-PIL"],
|
| 261 |
+
),
|
| 262 |
+
FileExtension(
|
| 263 |
+
name="Autodesk FLI Animation",
|
| 264 |
+
extension=".fli",
|
| 265 |
+
priority=["pillow", "FLI-PIL"],
|
| 266 |
+
),
|
| 267 |
+
FileExtension(
|
| 268 |
+
name="Kodak FlashPix",
|
| 269 |
+
extension=".fpx",
|
| 270 |
+
priority=["pillow", "FPX-PIL"],
|
| 271 |
+
),
|
| 272 |
+
FileExtension(
|
| 273 |
+
name="Independence War 2: Edge Of Chaos Texture Format",
|
| 274 |
+
extension=".ftc",
|
| 275 |
+
priority=["pillow", "FTEX-PIL"],
|
| 276 |
+
),
|
| 277 |
+
FileExtension(
|
| 278 |
+
name="Flexible Image Transport System File",
|
| 279 |
+
extension=".fts",
|
| 280 |
+
priority=["FITS"],
|
| 281 |
+
),
|
| 282 |
+
FileExtension(
|
| 283 |
+
name="Independence War 2: Edge Of Chaos Texture Format",
|
| 284 |
+
extension=".ftu",
|
| 285 |
+
priority=["pillow", "FTEX-PIL"],
|
| 286 |
+
),
|
| 287 |
+
FileExtension(
|
| 288 |
+
name="Flexible Image Transport System File",
|
| 289 |
+
extension=".fz",
|
| 290 |
+
priority=["FITS"],
|
| 291 |
+
),
|
| 292 |
+
FileExtension(
|
| 293 |
+
name="Raw fax format CCITT G.3",
|
| 294 |
+
extension=".g3",
|
| 295 |
+
priority=["G3-FI"],
|
| 296 |
+
),
|
| 297 |
+
FileExtension(
|
| 298 |
+
name="GIMP brush file",
|
| 299 |
+
extension=".gbr",
|
| 300 |
+
priority=["pillow", "GBR-PIL"],
|
| 301 |
+
),
|
| 302 |
+
FileExtension(
|
| 303 |
+
name="Grassroots DICOM",
|
| 304 |
+
extension=".gdcm",
|
| 305 |
+
priority=["ITK"],
|
| 306 |
+
),
|
| 307 |
+
FileExtension(
|
| 308 |
+
name="Graphics Interchange Format",
|
| 309 |
+
extension=".gif",
|
| 310 |
+
priority=["pillow", "GIF-PIL", "pyav"],
|
| 311 |
+
),
|
| 312 |
+
FileExtension(
|
| 313 |
+
name="UMDS GIPL",
|
| 314 |
+
extension=".gipl",
|
| 315 |
+
priority=["ITK"],
|
| 316 |
+
),
|
| 317 |
+
FileExtension(
|
| 318 |
+
name="gridded meteorological data",
|
| 319 |
+
extension=".grib",
|
| 320 |
+
priority=["pillow", "GRIB-PIL"],
|
| 321 |
+
),
|
| 322 |
+
FileExtension(
|
| 323 |
+
name="Hierarchical Data Format 5",
|
| 324 |
+
extension=".h5",
|
| 325 |
+
priority=["pillow", "HDF5-PIL"],
|
| 326 |
+
),
|
| 327 |
+
FileExtension(
|
| 328 |
+
name="Hierarchical Data Format 5",
|
| 329 |
+
extension=".hdf",
|
| 330 |
+
priority=["pillow", "HDF5-PIL"],
|
| 331 |
+
),
|
| 332 |
+
FileExtension(
|
| 333 |
+
name="Hierarchical Data Format 5",
|
| 334 |
+
extension=".hdf5",
|
| 335 |
+
priority=["ITK"],
|
| 336 |
+
),
|
| 337 |
+
FileExtension(
|
| 338 |
+
name="JPEG Extended Range",
|
| 339 |
+
extension=".hdp",
|
| 340 |
+
priority=["JPEG-XR-FI"],
|
| 341 |
+
),
|
| 342 |
+
FileExtension(
|
| 343 |
+
name="High Dynamic Range Image",
|
| 344 |
+
extension=".hdr",
|
| 345 |
+
priority=["HDR-FI", "ITK", "opencv"],
|
| 346 |
+
),
|
| 347 |
+
FileExtension(
|
| 348 |
+
extension=".ia",
|
| 349 |
+
priority=["RAW-FI"],
|
| 350 |
+
),
|
| 351 |
+
FileExtension(
|
| 352 |
+
extension=".icb",
|
| 353 |
+
priority=["pillow"],
|
| 354 |
+
),
|
| 355 |
+
FileExtension(
|
| 356 |
+
name="Mac OS Icon File",
|
| 357 |
+
extension=".icns",
|
| 358 |
+
priority=["pillow", "ICNS-PIL"],
|
| 359 |
+
),
|
| 360 |
+
FileExtension(
|
| 361 |
+
name="Windows Icon File",
|
| 362 |
+
extension=".ico",
|
| 363 |
+
priority=["pillow", "ICO-FI", "ICO-PIL", "pyav"],
|
| 364 |
+
),
|
| 365 |
+
FileExtension(
|
| 366 |
+
name="ILBM Interleaved Bitmap",
|
| 367 |
+
extension=".iff",
|
| 368 |
+
priority=["IFF-FI"],
|
| 369 |
+
),
|
| 370 |
+
FileExtension(
|
| 371 |
+
name="IPTC/NAA",
|
| 372 |
+
extension=".iim",
|
| 373 |
+
priority=["pillow", "IPTC-PIL"],
|
| 374 |
+
),
|
| 375 |
+
FileExtension(
|
| 376 |
+
extension=".iiq",
|
| 377 |
+
priority=["RAW-FI"],
|
| 378 |
+
),
|
| 379 |
+
FileExtension(
|
| 380 |
+
name="IFUNC Image Memory",
|
| 381 |
+
extension=".im",
|
| 382 |
+
priority=["pillow", "IM-PIL"],
|
| 383 |
+
),
|
| 384 |
+
FileExtension(
|
| 385 |
+
extension=".img",
|
| 386 |
+
priority=["ITK", "GDAL"],
|
| 387 |
+
),
|
| 388 |
+
FileExtension(
|
| 389 |
+
extension=".img.gz",
|
| 390 |
+
priority=["ITK"],
|
| 391 |
+
),
|
| 392 |
+
FileExtension(
|
| 393 |
+
name="IM Tools",
|
| 394 |
+
extension=".IMT",
|
| 395 |
+
priority=["pillow", "IMT-PIL"],
|
| 396 |
+
),
|
| 397 |
+
FileExtension(
|
| 398 |
+
name="Image Processing Lab",
|
| 399 |
+
extension=".ipl",
|
| 400 |
+
priority=["ITK"],
|
| 401 |
+
),
|
| 402 |
+
FileExtension(
|
| 403 |
+
name="JPEG 2000",
|
| 404 |
+
extension=".j2c",
|
| 405 |
+
priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"],
|
| 406 |
+
),
|
| 407 |
+
FileExtension(
|
| 408 |
+
name="JPEG 2000",
|
| 409 |
+
extension=".j2k",
|
| 410 |
+
priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"],
|
| 411 |
+
),
|
| 412 |
+
FileExtension(
|
| 413 |
+
name="JPEG",
|
| 414 |
+
extension=".jfif",
|
| 415 |
+
priority=["pillow", "JPEG-PIL"],
|
| 416 |
+
),
|
| 417 |
+
FileExtension(
|
| 418 |
+
name="JPEG",
|
| 419 |
+
extension=".jif",
|
| 420 |
+
priority=["JPEG-FI"],
|
| 421 |
+
),
|
| 422 |
+
FileExtension(
|
| 423 |
+
name="JPEG Network Graphics",
|
| 424 |
+
extension=".jng",
|
| 425 |
+
priority=["JNG-FI"],
|
| 426 |
+
),
|
| 427 |
+
FileExtension(
|
| 428 |
+
name="JPEG 2000",
|
| 429 |
+
extension=".jp2",
|
| 430 |
+
priority=["pillow", "JP2-FI", "JPEG2000-PIL", "pyav", "opencv"],
|
| 431 |
+
),
|
| 432 |
+
FileExtension(
|
| 433 |
+
name="JPEG 2000",
|
| 434 |
+
extension=".jpc",
|
| 435 |
+
priority=["pillow", "JPEG2000-PIL"],
|
| 436 |
+
),
|
| 437 |
+
FileExtension(
|
| 438 |
+
name="JPEG",
|
| 439 |
+
extension=".jpe",
|
| 440 |
+
priority=["pillow", "JPEG-FI", "JPEG-PIL", "opencv"],
|
| 441 |
+
),
|
| 442 |
+
FileExtension(
|
| 443 |
+
name="Joint Photographic Experts Group",
|
| 444 |
+
extension=".jpeg",
|
| 445 |
+
priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"],
|
| 446 |
+
),
|
| 447 |
+
FileExtension(
|
| 448 |
+
name="JPEG 2000",
|
| 449 |
+
extension=".jpf",
|
| 450 |
+
priority=["pillow", "JPEG2000-PIL"],
|
| 451 |
+
),
|
| 452 |
+
FileExtension(
|
| 453 |
+
name="Joint Photographic Experts Group",
|
| 454 |
+
extension=".jpg",
|
| 455 |
+
priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"],
|
| 456 |
+
),
|
| 457 |
+
FileExtension(
|
| 458 |
+
name="JPEG 2000",
|
| 459 |
+
extension=".jpx",
|
| 460 |
+
priority=["pillow", "JPEG2000-PIL"],
|
| 461 |
+
),
|
| 462 |
+
FileExtension(
|
| 463 |
+
name="JPEG Extended Range",
|
| 464 |
+
extension=".jxr",
|
| 465 |
+
priority=["JPEG-XR-FI"],
|
| 466 |
+
),
|
| 467 |
+
FileExtension(
|
| 468 |
+
extension=".k25",
|
| 469 |
+
priority=["RAW-FI"],
|
| 470 |
+
),
|
| 471 |
+
FileExtension(
|
| 472 |
+
extension=".kc2",
|
| 473 |
+
priority=["RAW-FI"],
|
| 474 |
+
),
|
| 475 |
+
FileExtension(
|
| 476 |
+
extension=".kdc",
|
| 477 |
+
priority=["RAW-FI"],
|
| 478 |
+
),
|
| 479 |
+
FileExtension(
|
| 480 |
+
name="C64 Koala Graphics",
|
| 481 |
+
extension=".koa",
|
| 482 |
+
priority=["KOALA-FI"],
|
| 483 |
+
),
|
| 484 |
+
FileExtension(
|
| 485 |
+
name="ILBM Interleaved Bitmap",
|
| 486 |
+
extension=".lbm",
|
| 487 |
+
priority=["IFF-FI"],
|
| 488 |
+
),
|
| 489 |
+
FileExtension(
|
| 490 |
+
name="Lytro F01",
|
| 491 |
+
extension=".lfp",
|
| 492 |
+
priority=["LYTRO-LFP"],
|
| 493 |
+
),
|
| 494 |
+
FileExtension(
|
| 495 |
+
name="Lytro Illum",
|
| 496 |
+
extension=".lfr",
|
| 497 |
+
priority=["LYTRO-LFR"],
|
| 498 |
+
),
|
| 499 |
+
FileExtension(
|
| 500 |
+
name="ZEISS LSM",
|
| 501 |
+
extension=".lsm",
|
| 502 |
+
priority=["tifffile", "ITK", "TIFF"],
|
| 503 |
+
),
|
| 504 |
+
FileExtension(
|
| 505 |
+
name="McIdas area file",
|
| 506 |
+
extension=".MCIDAS",
|
| 507 |
+
priority=["pillow", "MCIDAS-PIL"],
|
| 508 |
+
external_link="https://www.ssec.wisc.edu/mcidas/doc/prog_man/2003print/progman2003-formats.html",
|
| 509 |
+
),
|
| 510 |
+
FileExtension(
|
| 511 |
+
extension=".mdc",
|
| 512 |
+
priority=["RAW-FI"],
|
| 513 |
+
),
|
| 514 |
+
FileExtension(
|
| 515 |
+
extension=".mef",
|
| 516 |
+
priority=["RAW-FI"],
|
| 517 |
+
),
|
| 518 |
+
FileExtension(
|
| 519 |
+
name="FreeSurfer File Format",
|
| 520 |
+
extension=".mgh",
|
| 521 |
+
priority=["ITK"],
|
| 522 |
+
),
|
| 523 |
+
FileExtension(
|
| 524 |
+
name="ITK MetaImage",
|
| 525 |
+
extension=".mha",
|
| 526 |
+
priority=["ITK"],
|
| 527 |
+
),
|
| 528 |
+
FileExtension(
|
| 529 |
+
name="ITK MetaImage Header",
|
| 530 |
+
extension=".mhd",
|
| 531 |
+
priority=["ITK"],
|
| 532 |
+
),
|
| 533 |
+
FileExtension(
|
| 534 |
+
name="Microsoft Image Composer",
|
| 535 |
+
extension=".mic",
|
| 536 |
+
priority=["pillow", "MIC-PIL"],
|
| 537 |
+
),
|
| 538 |
+
FileExtension(
|
| 539 |
+
name="Matroska Multimedia Container",
|
| 540 |
+
extension=".mkv",
|
| 541 |
+
priority=["FFMPEG", "pyav"],
|
| 542 |
+
),
|
| 543 |
+
FileExtension(
|
| 544 |
+
name="Medical Imaging NetCDF",
|
| 545 |
+
extension=".mnc",
|
| 546 |
+
priority=["ITK"],
|
| 547 |
+
),
|
| 548 |
+
FileExtension(
|
| 549 |
+
name="Medical Imaging NetCDF 2",
|
| 550 |
+
extension=".mnc2",
|
| 551 |
+
priority=["ITK"],
|
| 552 |
+
),
|
| 553 |
+
FileExtension(
|
| 554 |
+
name="Leaf Raw Image Format",
|
| 555 |
+
extension=".mos",
|
| 556 |
+
priority=["RAW-FI"],
|
| 557 |
+
),
|
| 558 |
+
FileExtension(
|
| 559 |
+
name="QuickTime File Format",
|
| 560 |
+
extension=".mov",
|
| 561 |
+
priority=["FFMPEG", "pyav"],
|
| 562 |
+
),
|
| 563 |
+
FileExtension(
|
| 564 |
+
name="MPEG-4 Part 14",
|
| 565 |
+
extension=".mp4",
|
| 566 |
+
priority=["FFMPEG", "pyav"],
|
| 567 |
+
),
|
| 568 |
+
FileExtension(
|
| 569 |
+
name="MPEG-1 Moving Picture Experts Group",
|
| 570 |
+
extension=".mpeg",
|
| 571 |
+
priority=["FFMPEG", "pyav"],
|
| 572 |
+
),
|
| 573 |
+
FileExtension(
|
| 574 |
+
name="Moving Picture Experts Group",
|
| 575 |
+
extension=".mpg",
|
| 576 |
+
priority=["pillow", "FFMPEG", "pyav"],
|
| 577 |
+
),
|
| 578 |
+
FileExtension(
|
| 579 |
+
name="JPEG Multi-Picture Format",
|
| 580 |
+
extension=".mpo",
|
| 581 |
+
priority=["pillow", "MPO-PIL"],
|
| 582 |
+
),
|
| 583 |
+
FileExtension(
|
| 584 |
+
name="Magnetic resonance imaging",
|
| 585 |
+
extension=".mri",
|
| 586 |
+
priority=["DICOM"],
|
| 587 |
+
),
|
| 588 |
+
FileExtension(
|
| 589 |
+
extension=".mrw",
|
| 590 |
+
priority=["RAW-FI"],
|
| 591 |
+
),
|
| 592 |
+
FileExtension(
|
| 593 |
+
name="Windows Paint",
|
| 594 |
+
extension=".msp",
|
| 595 |
+
priority=["pillow", "MSP-PIL"],
|
| 596 |
+
),
|
| 597 |
+
FileExtension(
|
| 598 |
+
extension=".nef",
|
| 599 |
+
priority=["RAW-FI", "rawpy"],
|
| 600 |
+
),
|
| 601 |
+
FileExtension(
|
| 602 |
+
extension=".nhdr",
|
| 603 |
+
priority=["ITK"],
|
| 604 |
+
),
|
| 605 |
+
FileExtension(
|
| 606 |
+
extension=".nia",
|
| 607 |
+
priority=["ITK"],
|
| 608 |
+
),
|
| 609 |
+
FileExtension(
|
| 610 |
+
extension=".nii",
|
| 611 |
+
priority=["ITK"],
|
| 612 |
+
),
|
| 613 |
+
FileExtension(
|
| 614 |
+
name="nii.gz",
|
| 615 |
+
extension=".nii.gz",
|
| 616 |
+
priority=["ITK"],
|
| 617 |
+
),
|
| 618 |
+
FileExtension(
|
| 619 |
+
name="Numpy Array",
|
| 620 |
+
extension=".npz",
|
| 621 |
+
priority=["NPZ"],
|
| 622 |
+
volume_support=True,
|
| 623 |
+
),
|
| 624 |
+
FileExtension(
|
| 625 |
+
extension=".nrrd",
|
| 626 |
+
priority=["ITK"],
|
| 627 |
+
),
|
| 628 |
+
FileExtension(
|
| 629 |
+
extension=".nrw",
|
| 630 |
+
priority=["RAW-FI"],
|
| 631 |
+
),
|
| 632 |
+
FileExtension(
|
| 633 |
+
extension=".orf",
|
| 634 |
+
priority=["RAW-FI"],
|
| 635 |
+
),
|
| 636 |
+
FileExtension(
|
| 637 |
+
extension=".palm",
|
| 638 |
+
priority=["pillow"],
|
| 639 |
+
),
|
| 640 |
+
FileExtension(
|
| 641 |
+
name="Portable Bitmap",
|
| 642 |
+
extension=".pbm",
|
| 643 |
+
priority=["PGM-FI", "PGMRAW-FI", "pyav", "opencv"],
|
| 644 |
+
),
|
| 645 |
+
FileExtension(
|
| 646 |
+
name="Kodak PhotoCD",
|
| 647 |
+
extension=".pcd",
|
| 648 |
+
priority=["pillow", "PCD-FI", "PCD-PIL"],
|
| 649 |
+
),
|
| 650 |
+
FileExtension(
|
| 651 |
+
name="Macintosh PICT",
|
| 652 |
+
extension=".pct",
|
| 653 |
+
priority=["PICT-FI"],
|
| 654 |
+
),
|
| 655 |
+
FileExtension(
|
| 656 |
+
name="Zsoft Paintbrush",
|
| 657 |
+
extension=".PCX",
|
| 658 |
+
priority=["pillow", "PCX-FI", "PCX-PIL"],
|
| 659 |
+
),
|
| 660 |
+
FileExtension(
|
| 661 |
+
extension=".pdf",
|
| 662 |
+
priority=["pillow"],
|
| 663 |
+
),
|
| 664 |
+
FileExtension(
|
| 665 |
+
extension=".pef",
|
| 666 |
+
priority=["RAW-FI"],
|
| 667 |
+
),
|
| 668 |
+
FileExtension(
|
| 669 |
+
extension=".pfm",
|
| 670 |
+
priority=["PFM-FI", "pyav", "opencv"],
|
| 671 |
+
),
|
| 672 |
+
FileExtension(
|
| 673 |
+
name="Portable Greymap",
|
| 674 |
+
extension=".pgm",
|
| 675 |
+
priority=["pillow", "PGM-FI", "PGMRAW-FI", "pyav", "opencv"],
|
| 676 |
+
),
|
| 677 |
+
FileExtension(
|
| 678 |
+
name="Macintosh PICT",
|
| 679 |
+
extension=".pic",
|
| 680 |
+
priority=["PICT-FI", "ITK", "opencv"],
|
| 681 |
+
),
|
| 682 |
+
FileExtension(
|
| 683 |
+
name="Macintosh PICT",
|
| 684 |
+
extension=".pict",
|
| 685 |
+
priority=["PICT-FI"],
|
| 686 |
+
),
|
| 687 |
+
FileExtension(
|
| 688 |
+
name="Portable Network Graphics",
|
| 689 |
+
extension=".png",
|
| 690 |
+
priority=["pillow", "PNG-PIL", "PNG-FI", "ITK", "pyav", "opencv"],
|
| 691 |
+
),
|
| 692 |
+
FileExtension(
|
| 693 |
+
name="Portable Image Format",
|
| 694 |
+
extension=".pnm",
|
| 695 |
+
priority=["pillow", "opencv"],
|
| 696 |
+
),
|
| 697 |
+
FileExtension(
|
| 698 |
+
name="Pbmplus image",
|
| 699 |
+
extension=".ppm",
|
| 700 |
+
priority=["pillow", "PPM-PIL", "pyav"],
|
| 701 |
+
),
|
| 702 |
+
FileExtension(
|
| 703 |
+
name="Pbmplus image",
|
| 704 |
+
extension=".pbm",
|
| 705 |
+
priority=["pillow", "PPM-PIL", "PPM-FI"],
|
| 706 |
+
),
|
| 707 |
+
FileExtension(
|
| 708 |
+
name="Portable image format",
|
| 709 |
+
extension=".pxm",
|
| 710 |
+
priority=["opencv"],
|
| 711 |
+
),
|
| 712 |
+
FileExtension(
|
| 713 |
+
name="Portable Pixelmap (ASCII)",
|
| 714 |
+
extension=".ppm",
|
| 715 |
+
priority=["PPM-FI", "opencv"],
|
| 716 |
+
),
|
| 717 |
+
FileExtension(
|
| 718 |
+
name="Portable Pixelmap (Raw)",
|
| 719 |
+
extension=".ppm",
|
| 720 |
+
priority=["PPMRAW-FI"],
|
| 721 |
+
),
|
| 722 |
+
FileExtension(
|
| 723 |
+
name="Ghostscript",
|
| 724 |
+
extension=".ps",
|
| 725 |
+
priority=["pillow", "EPS-PIL"],
|
| 726 |
+
),
|
| 727 |
+
FileExtension(
|
| 728 |
+
name="Adope Photoshop 2.5 and 3.0",
|
| 729 |
+
extension=".psd",
|
| 730 |
+
priority=["pillow", "PSD-PIL", "PSD-FI"],
|
| 731 |
+
),
|
| 732 |
+
FileExtension(
|
| 733 |
+
extension=".ptx",
|
| 734 |
+
priority=["RAW-FI"],
|
| 735 |
+
),
|
| 736 |
+
FileExtension(
|
| 737 |
+
extension=".pxn",
|
| 738 |
+
priority=["RAW-FI"],
|
| 739 |
+
),
|
| 740 |
+
FileExtension(
|
| 741 |
+
name="PIXAR raster image",
|
| 742 |
+
extension=".pxr",
|
| 743 |
+
priority=["pillow", "PIXAR-PIL"],
|
| 744 |
+
),
|
| 745 |
+
FileExtension(
|
| 746 |
+
extension=".qtk",
|
| 747 |
+
priority=["RAW-FI"],
|
| 748 |
+
),
|
| 749 |
+
FileExtension(
|
| 750 |
+
extension=".raf",
|
| 751 |
+
priority=["RAW-FI"],
|
| 752 |
+
),
|
| 753 |
+
FileExtension(
|
| 754 |
+
name="Sun Raster File",
|
| 755 |
+
extension=".ras",
|
| 756 |
+
priority=["pillow", "SUN-PIL", "RAS-FI", "pyav", "opencv"],
|
| 757 |
+
),
|
| 758 |
+
FileExtension(
|
| 759 |
+
name="Sun Raster File",
|
| 760 |
+
extension=".sr",
|
| 761 |
+
priority=["opencv"],
|
| 762 |
+
),
|
| 763 |
+
FileExtension(
|
| 764 |
+
extension=".raw",
|
| 765 |
+
priority=["RAW-FI", "LYTRO-ILLUM-RAW", "LYTRO-F01-RAW", "rawpy"],
|
| 766 |
+
),
|
| 767 |
+
FileExtension(
|
| 768 |
+
extension=".rdc",
|
| 769 |
+
priority=["RAW-FI"],
|
| 770 |
+
),
|
| 771 |
+
FileExtension(
|
| 772 |
+
name="Silicon Graphics Image",
|
| 773 |
+
extension=".rgb",
|
| 774 |
+
priority=["pillow", "SGI-PIL"],
|
| 775 |
+
),
|
| 776 |
+
FileExtension(
|
| 777 |
+
name="Silicon Graphics Image",
|
| 778 |
+
extension=".rgba",
|
| 779 |
+
priority=["pillow", "SGI-PIL"],
|
| 780 |
+
),
|
| 781 |
+
FileExtension(
|
| 782 |
+
extension=".rw2",
|
| 783 |
+
priority=["RAW-FI"],
|
| 784 |
+
),
|
| 785 |
+
FileExtension(
|
| 786 |
+
extension=".rwl",
|
| 787 |
+
priority=["RAW-FI"],
|
| 788 |
+
),
|
| 789 |
+
FileExtension(
|
| 790 |
+
extension=".rwz",
|
| 791 |
+
priority=["RAW-FI"],
|
| 792 |
+
),
|
| 793 |
+
FileExtension(
|
| 794 |
+
name="Silicon Graphics Image",
|
| 795 |
+
extension=".sgi",
|
| 796 |
+
priority=["pillow", "SGI-PIL", "pyav"],
|
| 797 |
+
),
|
| 798 |
+
FileExtension(
|
| 799 |
+
name="SPE File Format",
|
| 800 |
+
extension=".spe",
|
| 801 |
+
priority=["SPE"],
|
| 802 |
+
),
|
| 803 |
+
FileExtension(
|
| 804 |
+
extension=".SPIDER",
|
| 805 |
+
priority=["pillow", "SPIDER-PIL"],
|
| 806 |
+
),
|
| 807 |
+
FileExtension(
|
| 808 |
+
extension=".sr2",
|
| 809 |
+
priority=["RAW-FI"],
|
| 810 |
+
),
|
| 811 |
+
FileExtension(
|
| 812 |
+
extension=".srf",
|
| 813 |
+
priority=["RAW-FI"],
|
| 814 |
+
),
|
| 815 |
+
FileExtension(
|
| 816 |
+
extension=".srw",
|
| 817 |
+
priority=["RAW-FI"],
|
| 818 |
+
),
|
| 819 |
+
FileExtension(
|
| 820 |
+
extension=".sti",
|
| 821 |
+
priority=["RAW-FI"],
|
| 822 |
+
),
|
| 823 |
+
FileExtension(
|
| 824 |
+
extension=".stk",
|
| 825 |
+
priority=["tifffile", "TIFF"],
|
| 826 |
+
),
|
| 827 |
+
FileExtension(
|
| 828 |
+
name="ShockWave Flash",
|
| 829 |
+
extension=".swf",
|
| 830 |
+
priority=["SWF", "pyav"],
|
| 831 |
+
),
|
| 832 |
+
FileExtension(
|
| 833 |
+
name="Truevision TGA",
|
| 834 |
+
extension=".targa",
|
| 835 |
+
priority=["pillow", "TARGA-FI"],
|
| 836 |
+
),
|
| 837 |
+
FileExtension(
|
| 838 |
+
name="Truevision TGA",
|
| 839 |
+
extension=".tga",
|
| 840 |
+
priority=["pillow", "TGA-PIL", "TARGA-FI", "pyav"],
|
| 841 |
+
),
|
| 842 |
+
FileExtension(
|
| 843 |
+
name="Tagged Image File",
|
| 844 |
+
extension=".tif",
|
| 845 |
+
priority=[
|
| 846 |
+
"tifffile",
|
| 847 |
+
"TIFF",
|
| 848 |
+
"pillow",
|
| 849 |
+
"TIFF-PIL",
|
| 850 |
+
"TIFF-FI",
|
| 851 |
+
"FEI",
|
| 852 |
+
"ITK",
|
| 853 |
+
"GDAL",
|
| 854 |
+
"pyav",
|
| 855 |
+
"opencv",
|
| 856 |
+
],
|
| 857 |
+
volume_support=True,
|
| 858 |
+
),
|
| 859 |
+
FileExtension(
|
| 860 |
+
name="Tagged Image File Format",
|
| 861 |
+
extension=".tiff",
|
| 862 |
+
priority=[
|
| 863 |
+
"tifffile",
|
| 864 |
+
"TIFF",
|
| 865 |
+
"pillow",
|
| 866 |
+
"TIFF-PIL",
|
| 867 |
+
"TIFF-FI",
|
| 868 |
+
"FEI",
|
| 869 |
+
"ITK",
|
| 870 |
+
"GDAL",
|
| 871 |
+
"pyav",
|
| 872 |
+
"opencv",
|
| 873 |
+
],
|
| 874 |
+
volume_support=True,
|
| 875 |
+
),
|
| 876 |
+
FileExtension(
|
| 877 |
+
extension=".vda",
|
| 878 |
+
priority=["pillow"],
|
| 879 |
+
),
|
| 880 |
+
FileExtension(
|
| 881 |
+
extension=".vst",
|
| 882 |
+
priority=["pillow"],
|
| 883 |
+
),
|
| 884 |
+
FileExtension(
|
| 885 |
+
extension=".vtk",
|
| 886 |
+
priority=["ITK"],
|
| 887 |
+
),
|
| 888 |
+
FileExtension(
|
| 889 |
+
name="Wireless Bitmap",
|
| 890 |
+
extension=".wap",
|
| 891 |
+
priority=["WBMP-FI"],
|
| 892 |
+
),
|
| 893 |
+
FileExtension(
|
| 894 |
+
name="Wireless Bitmap",
|
| 895 |
+
extension=".wbm",
|
| 896 |
+
priority=["WBMP-FI"],
|
| 897 |
+
),
|
| 898 |
+
FileExtension(
|
| 899 |
+
name="Wireless Bitmap",
|
| 900 |
+
extension=".wbmp",
|
| 901 |
+
priority=["WBMP-FI"],
|
| 902 |
+
),
|
| 903 |
+
FileExtension(
|
| 904 |
+
name="JPEG Extended Range",
|
| 905 |
+
extension=".wdp",
|
| 906 |
+
priority=["JPEG-XR-FI"],
|
| 907 |
+
),
|
| 908 |
+
FileExtension(
|
| 909 |
+
name="Matroska",
|
| 910 |
+
extension=".webm",
|
| 911 |
+
priority=["FFMPEG", "pyav"],
|
| 912 |
+
),
|
| 913 |
+
FileExtension(
|
| 914 |
+
name="Google WebP",
|
| 915 |
+
extension=".webp",
|
| 916 |
+
priority=["pillow", "WEBP-FI", "pyav", "opencv"],
|
| 917 |
+
),
|
| 918 |
+
FileExtension(
|
| 919 |
+
name="Windows Meta File",
|
| 920 |
+
extension=".wmf",
|
| 921 |
+
priority=["pillow", "WMF-PIL"],
|
| 922 |
+
),
|
| 923 |
+
FileExtension(
|
| 924 |
+
name="Windows Media Video",
|
| 925 |
+
extension=".wmv",
|
| 926 |
+
priority=["FFMPEG"],
|
| 927 |
+
),
|
| 928 |
+
FileExtension(
|
| 929 |
+
name="X11 Bitmap",
|
| 930 |
+
extension=".xbm",
|
| 931 |
+
priority=["pillow", "XBM-PIL", "XBM-FI", "pyav"],
|
| 932 |
+
),
|
| 933 |
+
FileExtension(
|
| 934 |
+
name="X11 Pixel Map",
|
| 935 |
+
extension=".xpm",
|
| 936 |
+
priority=["pillow", "XPM-PIL", "XPM-FI"],
|
| 937 |
+
),
|
| 938 |
+
FileExtension(
|
| 939 |
+
name="Thumbnail Image",
|
| 940 |
+
extension=".XVTHUMB",
|
| 941 |
+
priority=["pillow", "XVTHUMB-PIL"],
|
| 942 |
+
),
|
| 943 |
+
FileExtension(
|
| 944 |
+
extension=".dpx",
|
| 945 |
+
priority=["pyav"],
|
| 946 |
+
),
|
| 947 |
+
FileExtension(
|
| 948 |
+
extension=".im1",
|
| 949 |
+
priority=["pyav"],
|
| 950 |
+
),
|
| 951 |
+
FileExtension(
|
| 952 |
+
extension=".im24",
|
| 953 |
+
priority=["pyav"],
|
| 954 |
+
),
|
| 955 |
+
FileExtension(
|
| 956 |
+
extension=".im8",
|
| 957 |
+
priority=["pyav"],
|
| 958 |
+
),
|
| 959 |
+
FileExtension(
|
| 960 |
+
extension=".jls",
|
| 961 |
+
priority=["pyav"],
|
| 962 |
+
),
|
| 963 |
+
FileExtension(
|
| 964 |
+
extension=".ljpg",
|
| 965 |
+
priority=["pyav"],
|
| 966 |
+
),
|
| 967 |
+
FileExtension(
|
| 968 |
+
extension=".pam",
|
| 969 |
+
priority=["pyav"],
|
| 970 |
+
),
|
| 971 |
+
FileExtension(
|
| 972 |
+
extension=".pcx",
|
| 973 |
+
priority=["pyav"],
|
| 974 |
+
),
|
| 975 |
+
FileExtension(
|
| 976 |
+
extension=".pgmyuv",
|
| 977 |
+
priority=["pyav"],
|
| 978 |
+
),
|
| 979 |
+
FileExtension(
|
| 980 |
+
extension=".pix",
|
| 981 |
+
priority=["pyav"],
|
| 982 |
+
),
|
| 983 |
+
FileExtension(
|
| 984 |
+
extension=".ppm",
|
| 985 |
+
priority=["pyav"],
|
| 986 |
+
),
|
| 987 |
+
FileExtension(
|
| 988 |
+
extension=".rs",
|
| 989 |
+
priority=["pyav"],
|
| 990 |
+
),
|
| 991 |
+
FileExtension(
|
| 992 |
+
extension=".sun",
|
| 993 |
+
priority=["pyav"],
|
| 994 |
+
),
|
| 995 |
+
FileExtension(
|
| 996 |
+
extension=".sunras",
|
| 997 |
+
priority=["pyav"],
|
| 998 |
+
),
|
| 999 |
+
FileExtension(
|
| 1000 |
+
extension=".xface",
|
| 1001 |
+
priority=["pyav"],
|
| 1002 |
+
),
|
| 1003 |
+
FileExtension(
|
| 1004 |
+
extension=".xwd",
|
| 1005 |
+
priority=["pyav"],
|
| 1006 |
+
),
|
| 1007 |
+
FileExtension(
|
| 1008 |
+
extension=".y",
|
| 1009 |
+
priority=["pyav"],
|
| 1010 |
+
),
|
| 1011 |
+
FileExtension(
|
| 1012 |
+
name="3GP (3GPP file format)",
|
| 1013 |
+
extension=".3g2",
|
| 1014 |
+
priority=["pyav"],
|
| 1015 |
+
),
|
| 1016 |
+
FileExtension(
|
| 1017 |
+
name="3GP (3GPP file format)",
|
| 1018 |
+
extension=".3gp",
|
| 1019 |
+
priority=["pyav"],
|
| 1020 |
+
),
|
| 1021 |
+
FileExtension(
|
| 1022 |
+
name="3GP (3GPP file format)",
|
| 1023 |
+
extension=".f4v",
|
| 1024 |
+
priority=["pyav"],
|
| 1025 |
+
),
|
| 1026 |
+
FileExtension(
|
| 1027 |
+
name="3GP (3GPP file format)",
|
| 1028 |
+
extension=".ism",
|
| 1029 |
+
priority=["pyav"],
|
| 1030 |
+
),
|
| 1031 |
+
FileExtension(
|
| 1032 |
+
name="3GP (3GPP file format)",
|
| 1033 |
+
extension=".isma",
|
| 1034 |
+
priority=["pyav"],
|
| 1035 |
+
),
|
| 1036 |
+
FileExtension(
|
| 1037 |
+
name="3GP (3GPP file format)",
|
| 1038 |
+
extension=".ismv",
|
| 1039 |
+
priority=["pyav"],
|
| 1040 |
+
),
|
| 1041 |
+
FileExtension(
|
| 1042 |
+
name="3GP (3GPP file format)",
|
| 1043 |
+
extension=".m4a",
|
| 1044 |
+
priority=["pyav"],
|
| 1045 |
+
),
|
| 1046 |
+
FileExtension(
|
| 1047 |
+
name="3GP (3GPP file format)",
|
| 1048 |
+
extension=".m4b",
|
| 1049 |
+
priority=["pyav"],
|
| 1050 |
+
),
|
| 1051 |
+
FileExtension(
|
| 1052 |
+
name="3GP (3GPP file format)",
|
| 1053 |
+
extension=".mj2",
|
| 1054 |
+
priority=["pyav"],
|
| 1055 |
+
),
|
| 1056 |
+
FileExtension(
|
| 1057 |
+
name="3GP (3GPP file format)",
|
| 1058 |
+
extension=".psp",
|
| 1059 |
+
priority=["pyav"],
|
| 1060 |
+
),
|
| 1061 |
+
FileExtension(
|
| 1062 |
+
name="3GP2 (3GPP2 file format)",
|
| 1063 |
+
extension=".3g2",
|
| 1064 |
+
priority=["pyav"],
|
| 1065 |
+
),
|
| 1066 |
+
FileExtension(
|
| 1067 |
+
name="3GP2 (3GPP2 file format)",
|
| 1068 |
+
extension=".3gp",
|
| 1069 |
+
priority=["pyav"],
|
| 1070 |
+
),
|
| 1071 |
+
FileExtension(
|
| 1072 |
+
name="3GP2 (3GPP2 file format)",
|
| 1073 |
+
extension=".f4v",
|
| 1074 |
+
priority=["pyav"],
|
| 1075 |
+
),
|
| 1076 |
+
FileExtension(
|
| 1077 |
+
name="3GP2 (3GPP2 file format)",
|
| 1078 |
+
extension=".ism",
|
| 1079 |
+
priority=["pyav"],
|
| 1080 |
+
),
|
| 1081 |
+
FileExtension(
|
| 1082 |
+
name="3GP2 (3GPP2 file format)",
|
| 1083 |
+
extension=".isma",
|
| 1084 |
+
priority=["pyav"],
|
| 1085 |
+
),
|
| 1086 |
+
FileExtension(
|
| 1087 |
+
name="3GP2 (3GPP2 file format)",
|
| 1088 |
+
extension=".ismv",
|
| 1089 |
+
priority=["pyav"],
|
| 1090 |
+
),
|
| 1091 |
+
FileExtension(
|
| 1092 |
+
name="3GP2 (3GPP2 file format)",
|
| 1093 |
+
extension=".m4a",
|
| 1094 |
+
priority=["pyav"],
|
| 1095 |
+
),
|
| 1096 |
+
FileExtension(
|
| 1097 |
+
name="3GP2 (3GPP2 file format)",
|
| 1098 |
+
extension=".m4b",
|
| 1099 |
+
priority=["pyav"],
|
| 1100 |
+
),
|
| 1101 |
+
FileExtension(
|
| 1102 |
+
name="3GP2 (3GPP2 file format)",
|
| 1103 |
+
extension=".mj2",
|
| 1104 |
+
priority=["pyav"],
|
| 1105 |
+
),
|
| 1106 |
+
FileExtension(
|
| 1107 |
+
name="3GP2 (3GPP2 file format)",
|
| 1108 |
+
extension=".psp",
|
| 1109 |
+
priority=["pyav"],
|
| 1110 |
+
),
|
| 1111 |
+
FileExtension(
|
| 1112 |
+
name="3GPP AMR",
|
| 1113 |
+
extension=".amr",
|
| 1114 |
+
priority=["pyav"],
|
| 1115 |
+
),
|
| 1116 |
+
FileExtension(
|
| 1117 |
+
name="a64 - video for Commodore 64",
|
| 1118 |
+
extension=".A64",
|
| 1119 |
+
priority=["pyav"],
|
| 1120 |
+
),
|
| 1121 |
+
FileExtension(
|
| 1122 |
+
name="a64 - video for Commodore 64",
|
| 1123 |
+
extension=".a64",
|
| 1124 |
+
priority=["pyav"],
|
| 1125 |
+
),
|
| 1126 |
+
FileExtension(
|
| 1127 |
+
name="Adobe Filmstrip",
|
| 1128 |
+
extension=".flm",
|
| 1129 |
+
priority=["pyav"],
|
| 1130 |
+
),
|
| 1131 |
+
FileExtension(
|
| 1132 |
+
name="AMV",
|
| 1133 |
+
extension=".amv",
|
| 1134 |
+
priority=["pyav"],
|
| 1135 |
+
),
|
| 1136 |
+
FileExtension(
|
| 1137 |
+
name="ASF (Advanced / Active Streaming Format)",
|
| 1138 |
+
extension=".asf",
|
| 1139 |
+
priority=["pyav"],
|
| 1140 |
+
),
|
| 1141 |
+
FileExtension(
|
| 1142 |
+
name="ASF (Advanced / Active Streaming Format)",
|
| 1143 |
+
extension=".asf",
|
| 1144 |
+
priority=["pyav"],
|
| 1145 |
+
),
|
| 1146 |
+
FileExtension(
|
| 1147 |
+
name="ASF (Advanced / Active Streaming Format)",
|
| 1148 |
+
extension=".wmv",
|
| 1149 |
+
priority=["pyav"],
|
| 1150 |
+
),
|
| 1151 |
+
FileExtension(
|
| 1152 |
+
name="ASF (Advanced / Active Streaming Format)",
|
| 1153 |
+
extension=".wmv",
|
| 1154 |
+
priority=["pyav"],
|
| 1155 |
+
),
|
| 1156 |
+
FileExtension(
|
| 1157 |
+
name="AV1 Annex B",
|
| 1158 |
+
extension=".obu",
|
| 1159 |
+
priority=["pyav"],
|
| 1160 |
+
),
|
| 1161 |
+
FileExtension(
|
| 1162 |
+
name="AV1 low overhead OBU",
|
| 1163 |
+
extension=".obu",
|
| 1164 |
+
priority=["pyav"],
|
| 1165 |
+
),
|
| 1166 |
+
FileExtension(
|
| 1167 |
+
name="AVI (Audio Video Interleaved)",
|
| 1168 |
+
extension=".avi",
|
| 1169 |
+
priority=["pyav"],
|
| 1170 |
+
),
|
| 1171 |
+
FileExtension(
|
| 1172 |
+
name="AVR (Audio Visual Research)",
|
| 1173 |
+
extension=".avr",
|
| 1174 |
+
priority=["pyav"],
|
| 1175 |
+
),
|
| 1176 |
+
FileExtension(
|
| 1177 |
+
name="Beam Software SIFF",
|
| 1178 |
+
extension=".vb",
|
| 1179 |
+
priority=["pyav"],
|
| 1180 |
+
),
|
| 1181 |
+
FileExtension(
|
| 1182 |
+
name="CD Graphics",
|
| 1183 |
+
extension=".cdg",
|
| 1184 |
+
priority=["pyav"],
|
| 1185 |
+
),
|
| 1186 |
+
FileExtension(
|
| 1187 |
+
name="Commodore CDXL video",
|
| 1188 |
+
extension=".cdxl",
|
| 1189 |
+
priority=["pyav"],
|
| 1190 |
+
),
|
| 1191 |
+
FileExtension(
|
| 1192 |
+
name="Commodore CDXL video",
|
| 1193 |
+
extension=".xl",
|
| 1194 |
+
priority=["pyav"],
|
| 1195 |
+
),
|
| 1196 |
+
FileExtension(
|
| 1197 |
+
name="DASH Muxer",
|
| 1198 |
+
extension=".mpd",
|
| 1199 |
+
priority=["pyav"],
|
| 1200 |
+
),
|
| 1201 |
+
FileExtension(
|
| 1202 |
+
name="Digital Pictures SGA",
|
| 1203 |
+
extension=".sga",
|
| 1204 |
+
priority=["pyav"],
|
| 1205 |
+
),
|
| 1206 |
+
FileExtension(
|
| 1207 |
+
name="Discworld II BMV",
|
| 1208 |
+
extension=".bmv",
|
| 1209 |
+
priority=["pyav"],
|
| 1210 |
+
),
|
| 1211 |
+
FileExtension(
|
| 1212 |
+
name="DV (Digital Video)",
|
| 1213 |
+
extension=".dif",
|
| 1214 |
+
priority=["pyav"],
|
| 1215 |
+
),
|
| 1216 |
+
FileExtension(
|
| 1217 |
+
name="DV (Digital Video)",
|
| 1218 |
+
extension=".dv",
|
| 1219 |
+
priority=["pyav"],
|
| 1220 |
+
),
|
| 1221 |
+
FileExtension(
|
| 1222 |
+
name="F4V Adobe Flash Video",
|
| 1223 |
+
extension=".f4v",
|
| 1224 |
+
priority=["pyav"],
|
| 1225 |
+
),
|
| 1226 |
+
FileExtension(
|
| 1227 |
+
name="FLV (Flash Video)",
|
| 1228 |
+
extension=".flv",
|
| 1229 |
+
priority=["pyav"],
|
| 1230 |
+
),
|
| 1231 |
+
FileExtension(
|
| 1232 |
+
name="GXF (General eXchange Format)",
|
| 1233 |
+
extension=".gxf",
|
| 1234 |
+
priority=["pyav"],
|
| 1235 |
+
),
|
| 1236 |
+
FileExtension(
|
| 1237 |
+
name="iCE Draw File",
|
| 1238 |
+
extension=".idf",
|
| 1239 |
+
priority=["pyav"],
|
| 1240 |
+
),
|
| 1241 |
+
FileExtension(
|
| 1242 |
+
name="IFV CCTV DVR",
|
| 1243 |
+
extension=".ifv",
|
| 1244 |
+
priority=["pyav"],
|
| 1245 |
+
),
|
| 1246 |
+
FileExtension(
|
| 1247 |
+
name="iPod H.264 MP4 (MPEG-4 Part 14)",
|
| 1248 |
+
extension=".m4a",
|
| 1249 |
+
priority=["pyav"],
|
| 1250 |
+
),
|
| 1251 |
+
FileExtension(
|
| 1252 |
+
name="iPod H.264 MP4 (MPEG-4 Part 14)",
|
| 1253 |
+
extension=".m4b",
|
| 1254 |
+
priority=["pyav"],
|
| 1255 |
+
),
|
| 1256 |
+
FileExtension(
|
| 1257 |
+
name="iPod H.264 MP4 (MPEG-4 Part 14)",
|
| 1258 |
+
extension=".m4v",
|
| 1259 |
+
priority=["pyav"],
|
| 1260 |
+
),
|
| 1261 |
+
FileExtension(
|
| 1262 |
+
name="IVR (Internet Video Recording)",
|
| 1263 |
+
extension=".ivr",
|
| 1264 |
+
priority=["pyav"],
|
| 1265 |
+
),
|
| 1266 |
+
FileExtension(
|
| 1267 |
+
name="Konami PS2 SVAG",
|
| 1268 |
+
extension=".svag",
|
| 1269 |
+
priority=["pyav"],
|
| 1270 |
+
),
|
| 1271 |
+
FileExtension(
|
| 1272 |
+
name="KUX (YouKu)",
|
| 1273 |
+
extension=".kux",
|
| 1274 |
+
priority=["pyav"],
|
| 1275 |
+
),
|
| 1276 |
+
FileExtension(
|
| 1277 |
+
name="live RTMP FLV (Flash Video)",
|
| 1278 |
+
extension=".flv",
|
| 1279 |
+
priority=["pyav"],
|
| 1280 |
+
),
|
| 1281 |
+
FileExtension(
|
| 1282 |
+
name="Loki SDL MJPEG",
|
| 1283 |
+
extension=".mjpg",
|
| 1284 |
+
priority=["pyav"],
|
| 1285 |
+
),
|
| 1286 |
+
FileExtension(
|
| 1287 |
+
name="LVF",
|
| 1288 |
+
extension=".lvf",
|
| 1289 |
+
priority=["pyav"],
|
| 1290 |
+
),
|
| 1291 |
+
FileExtension(
|
| 1292 |
+
name="Matroska / WebM",
|
| 1293 |
+
extension=".mk3d",
|
| 1294 |
+
priority=["pyav"],
|
| 1295 |
+
),
|
| 1296 |
+
FileExtension(
|
| 1297 |
+
name="Matroska / WebM",
|
| 1298 |
+
extension=".mka",
|
| 1299 |
+
priority=["pyav"],
|
| 1300 |
+
),
|
| 1301 |
+
FileExtension(
|
| 1302 |
+
name="Matroska / WebM",
|
| 1303 |
+
extension=".mks",
|
| 1304 |
+
priority=["pyav"],
|
| 1305 |
+
),
|
| 1306 |
+
FileExtension(
|
| 1307 |
+
name="Microsoft XMV",
|
| 1308 |
+
extension=".xmv",
|
| 1309 |
+
priority=["pyav"],
|
| 1310 |
+
),
|
| 1311 |
+
FileExtension(
|
| 1312 |
+
name="MIME multipart JPEG",
|
| 1313 |
+
extension=".mjpg",
|
| 1314 |
+
priority=["pyav"],
|
| 1315 |
+
),
|
| 1316 |
+
FileExtension(
|
| 1317 |
+
name="MobiClip MODS",
|
| 1318 |
+
extension=".mods",
|
| 1319 |
+
priority=["pyav"],
|
| 1320 |
+
),
|
| 1321 |
+
FileExtension(
|
| 1322 |
+
name="MobiClip MOFLEX",
|
| 1323 |
+
extension=".moflex",
|
| 1324 |
+
priority=["pyav"],
|
| 1325 |
+
),
|
| 1326 |
+
FileExtension(
|
| 1327 |
+
name="Motion Pixels MVI",
|
| 1328 |
+
extension=".mvi",
|
| 1329 |
+
priority=["pyav"],
|
| 1330 |
+
),
|
| 1331 |
+
FileExtension(
|
| 1332 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1333 |
+
extension=".3g2",
|
| 1334 |
+
priority=["pyav"],
|
| 1335 |
+
),
|
| 1336 |
+
FileExtension(
|
| 1337 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1338 |
+
extension=".3gp",
|
| 1339 |
+
priority=["pyav"],
|
| 1340 |
+
),
|
| 1341 |
+
FileExtension(
|
| 1342 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1343 |
+
extension=".f4v",
|
| 1344 |
+
priority=["pyav"],
|
| 1345 |
+
),
|
| 1346 |
+
FileExtension(
|
| 1347 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1348 |
+
extension=".ism",
|
| 1349 |
+
priority=["pyav"],
|
| 1350 |
+
),
|
| 1351 |
+
FileExtension(
|
| 1352 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1353 |
+
extension=".isma",
|
| 1354 |
+
priority=["pyav"],
|
| 1355 |
+
),
|
| 1356 |
+
FileExtension(
|
| 1357 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1358 |
+
extension=".ismv",
|
| 1359 |
+
priority=["pyav"],
|
| 1360 |
+
),
|
| 1361 |
+
FileExtension(
|
| 1362 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1363 |
+
extension=".m4a",
|
| 1364 |
+
priority=["pyav"],
|
| 1365 |
+
),
|
| 1366 |
+
FileExtension(
|
| 1367 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1368 |
+
extension=".m4b",
|
| 1369 |
+
priority=["pyav"],
|
| 1370 |
+
),
|
| 1371 |
+
FileExtension(
|
| 1372 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1373 |
+
extension=".mj2",
|
| 1374 |
+
priority=["pyav"],
|
| 1375 |
+
),
|
| 1376 |
+
FileExtension(
|
| 1377 |
+
name="MP4 (MPEG-4 Part 14)",
|
| 1378 |
+
extension=".psp",
|
| 1379 |
+
priority=["pyav"],
|
| 1380 |
+
),
|
| 1381 |
+
FileExtension(
|
| 1382 |
+
name="MPEG-2 PS (DVD VOB)",
|
| 1383 |
+
extension=".dvd",
|
| 1384 |
+
priority=["pyav"],
|
| 1385 |
+
),
|
| 1386 |
+
FileExtension(
|
| 1387 |
+
name="MPEG-2 PS (SVCD)",
|
| 1388 |
+
extension=".vob",
|
| 1389 |
+
priority=["pyav"],
|
| 1390 |
+
),
|
| 1391 |
+
FileExtension(
|
| 1392 |
+
name="MPEG-2 PS (VOB)",
|
| 1393 |
+
extension=".vob",
|
| 1394 |
+
priority=["pyav"],
|
| 1395 |
+
),
|
| 1396 |
+
FileExtension(
|
| 1397 |
+
name="MPEG-TS (MPEG-2 Transport Stream)",
|
| 1398 |
+
extension=".m2t",
|
| 1399 |
+
priority=["pyav"],
|
| 1400 |
+
),
|
| 1401 |
+
FileExtension(
|
| 1402 |
+
name="MPEG-TS (MPEG-2 Transport Stream)",
|
| 1403 |
+
extension=".m2ts",
|
| 1404 |
+
priority=["pyav"],
|
| 1405 |
+
),
|
| 1406 |
+
FileExtension(
|
| 1407 |
+
name="MPEG-TS (MPEG-2 Transport Stream)",
|
| 1408 |
+
extension=".mts",
|
| 1409 |
+
priority=["pyav"],
|
| 1410 |
+
),
|
| 1411 |
+
FileExtension(
|
| 1412 |
+
name="MPEG-TS (MPEG-2 Transport Stream)",
|
| 1413 |
+
extension=".ts",
|
| 1414 |
+
priority=["pyav"],
|
| 1415 |
+
),
|
| 1416 |
+
FileExtension(
|
| 1417 |
+
name="Musepack",
|
| 1418 |
+
extension=".mpc",
|
| 1419 |
+
priority=["pyav"],
|
| 1420 |
+
),
|
| 1421 |
+
FileExtension(
|
| 1422 |
+
name="MXF (Material eXchange Format) Operational Pattern Atom",
|
| 1423 |
+
extension=".mxf",
|
| 1424 |
+
priority=["pyav"],
|
| 1425 |
+
),
|
| 1426 |
+
FileExtension(
|
| 1427 |
+
name="MXF (Material eXchange Format)",
|
| 1428 |
+
extension=".mxf",
|
| 1429 |
+
priority=["pyav"],
|
| 1430 |
+
),
|
| 1431 |
+
FileExtension(
|
| 1432 |
+
name="MxPEG clip",
|
| 1433 |
+
extension=".mxg",
|
| 1434 |
+
priority=["pyav"],
|
| 1435 |
+
),
|
| 1436 |
+
FileExtension(
|
| 1437 |
+
name="NC camera feed",
|
| 1438 |
+
extension=".v",
|
| 1439 |
+
priority=["pyav"],
|
| 1440 |
+
),
|
| 1441 |
+
FileExtension(
|
| 1442 |
+
name="NUT",
|
| 1443 |
+
extension=".nut",
|
| 1444 |
+
priority=["pyav"],
|
| 1445 |
+
),
|
| 1446 |
+
FileExtension(
|
| 1447 |
+
name="Ogg Video",
|
| 1448 |
+
extension=".ogv",
|
| 1449 |
+
priority=["pyav"],
|
| 1450 |
+
),
|
| 1451 |
+
FileExtension(
|
| 1452 |
+
name="Ogg",
|
| 1453 |
+
extension=".ogg",
|
| 1454 |
+
priority=["pyav"],
|
| 1455 |
+
),
|
| 1456 |
+
FileExtension(
|
| 1457 |
+
name="On2 IVF",
|
| 1458 |
+
extension=".ivf",
|
| 1459 |
+
priority=["pyav"],
|
| 1460 |
+
),
|
| 1461 |
+
FileExtension(
|
| 1462 |
+
name="PSP MP4 (MPEG-4 Part 14)",
|
| 1463 |
+
extension=".psp",
|
| 1464 |
+
priority=["pyav"],
|
| 1465 |
+
),
|
| 1466 |
+
FileExtension(
|
| 1467 |
+
name="Psygnosis YOP",
|
| 1468 |
+
extension=".yop",
|
| 1469 |
+
priority=["pyav"],
|
| 1470 |
+
),
|
| 1471 |
+
FileExtension(
|
| 1472 |
+
name="QuickTime / MOV",
|
| 1473 |
+
extension=".3g2",
|
| 1474 |
+
priority=["pyav"],
|
| 1475 |
+
),
|
| 1476 |
+
FileExtension(
|
| 1477 |
+
name="QuickTime / MOV",
|
| 1478 |
+
extension=".3gp",
|
| 1479 |
+
priority=["pyav"],
|
| 1480 |
+
),
|
| 1481 |
+
FileExtension(
|
| 1482 |
+
name="QuickTime / MOV",
|
| 1483 |
+
extension=".f4v",
|
| 1484 |
+
priority=["pyav"],
|
| 1485 |
+
),
|
| 1486 |
+
FileExtension(
|
| 1487 |
+
name="QuickTime / MOV",
|
| 1488 |
+
extension=".ism",
|
| 1489 |
+
priority=["pyav"],
|
| 1490 |
+
),
|
| 1491 |
+
FileExtension(
|
| 1492 |
+
name="QuickTime / MOV",
|
| 1493 |
+
extension=".isma",
|
| 1494 |
+
priority=["pyav"],
|
| 1495 |
+
),
|
| 1496 |
+
FileExtension(
|
| 1497 |
+
name="QuickTime / MOV",
|
| 1498 |
+
extension=".ismv",
|
| 1499 |
+
priority=["pyav"],
|
| 1500 |
+
),
|
| 1501 |
+
FileExtension(
|
| 1502 |
+
name="QuickTime / MOV",
|
| 1503 |
+
extension=".m4a",
|
| 1504 |
+
priority=["pyav"],
|
| 1505 |
+
),
|
| 1506 |
+
FileExtension(
|
| 1507 |
+
name="QuickTime / MOV",
|
| 1508 |
+
extension=".m4b",
|
| 1509 |
+
priority=["pyav"],
|
| 1510 |
+
),
|
| 1511 |
+
FileExtension(
|
| 1512 |
+
name="QuickTime / MOV",
|
| 1513 |
+
extension=".mj2",
|
| 1514 |
+
priority=["pyav"],
|
| 1515 |
+
),
|
| 1516 |
+
FileExtension(
|
| 1517 |
+
name="QuickTime / MOV",
|
| 1518 |
+
extension=".psp",
|
| 1519 |
+
priority=["pyav"],
|
| 1520 |
+
),
|
| 1521 |
+
FileExtension(
|
| 1522 |
+
name="raw AVS2-P2/IEEE1857.4 video",
|
| 1523 |
+
extension=".avs",
|
| 1524 |
+
priority=["pyav"],
|
| 1525 |
+
),
|
| 1526 |
+
FileExtension(
|
| 1527 |
+
name="raw AVS2-P2/IEEE1857.4 video",
|
| 1528 |
+
extension=".avs2",
|
| 1529 |
+
priority=["pyav"],
|
| 1530 |
+
),
|
| 1531 |
+
FileExtension(
|
| 1532 |
+
name="raw AVS3-P2/IEEE1857.10",
|
| 1533 |
+
extension=".avs3",
|
| 1534 |
+
priority=["pyav"],
|
| 1535 |
+
),
|
| 1536 |
+
FileExtension(
|
| 1537 |
+
name="raw Chinese AVS (Audio Video Standard) video",
|
| 1538 |
+
extension=".cavs",
|
| 1539 |
+
priority=["pyav"],
|
| 1540 |
+
),
|
| 1541 |
+
FileExtension(
|
| 1542 |
+
name="raw Dirac",
|
| 1543 |
+
extension=".drc",
|
| 1544 |
+
priority=["pyav"],
|
| 1545 |
+
),
|
| 1546 |
+
FileExtension(
|
| 1547 |
+
name="raw Dirac",
|
| 1548 |
+
extension=".vc2",
|
| 1549 |
+
priority=["pyav"],
|
| 1550 |
+
),
|
| 1551 |
+
FileExtension(
|
| 1552 |
+
name="raw DNxHD (SMPTE VC-3)",
|
| 1553 |
+
extension=".dnxhd",
|
| 1554 |
+
priority=["pyav"],
|
| 1555 |
+
),
|
| 1556 |
+
FileExtension(
|
| 1557 |
+
name="raw DNxHD (SMPTE VC-3)",
|
| 1558 |
+
extension=".dnxhr",
|
| 1559 |
+
priority=["pyav"],
|
| 1560 |
+
),
|
| 1561 |
+
FileExtension(
|
| 1562 |
+
name="raw GSM",
|
| 1563 |
+
extension=".gsm",
|
| 1564 |
+
priority=["pyav"],
|
| 1565 |
+
),
|
| 1566 |
+
FileExtension(
|
| 1567 |
+
name="raw H.261",
|
| 1568 |
+
extension=".h261",
|
| 1569 |
+
priority=["pyav"],
|
| 1570 |
+
),
|
| 1571 |
+
FileExtension(
|
| 1572 |
+
name="raw H.263",
|
| 1573 |
+
extension=".h263",
|
| 1574 |
+
priority=["pyav"],
|
| 1575 |
+
),
|
| 1576 |
+
FileExtension(
|
| 1577 |
+
name="raw H.264 video",
|
| 1578 |
+
extension=".264",
|
| 1579 |
+
priority=["pyav"],
|
| 1580 |
+
),
|
| 1581 |
+
FileExtension(
|
| 1582 |
+
name="raw H.264 video",
|
| 1583 |
+
extension=".avc",
|
| 1584 |
+
priority=["pyav"],
|
| 1585 |
+
),
|
| 1586 |
+
FileExtension(
|
| 1587 |
+
name="raw H.264 video",
|
| 1588 |
+
extension=".h264",
|
| 1589 |
+
priority=["pyav", "FFMPEG"],
|
| 1590 |
+
),
|
| 1591 |
+
FileExtension(
|
| 1592 |
+
name="raw H.264 video",
|
| 1593 |
+
extension=".h26l",
|
| 1594 |
+
priority=["pyav"],
|
| 1595 |
+
),
|
| 1596 |
+
FileExtension(
|
| 1597 |
+
name="raw HEVC video",
|
| 1598 |
+
extension=".265",
|
| 1599 |
+
priority=["pyav"],
|
| 1600 |
+
),
|
| 1601 |
+
FileExtension(
|
| 1602 |
+
name="raw HEVC video",
|
| 1603 |
+
extension=".h265",
|
| 1604 |
+
priority=["pyav"],
|
| 1605 |
+
),
|
| 1606 |
+
FileExtension(
|
| 1607 |
+
name="raw HEVC video",
|
| 1608 |
+
extension=".hevc",
|
| 1609 |
+
priority=["pyav"],
|
| 1610 |
+
),
|
| 1611 |
+
FileExtension(
|
| 1612 |
+
name="raw id RoQ",
|
| 1613 |
+
extension=".roq",
|
| 1614 |
+
priority=["pyav"],
|
| 1615 |
+
),
|
| 1616 |
+
FileExtension(
|
| 1617 |
+
name="raw Ingenient MJPEG",
|
| 1618 |
+
extension=".cgi",
|
| 1619 |
+
priority=["pyav"],
|
| 1620 |
+
),
|
| 1621 |
+
FileExtension(
|
| 1622 |
+
name="raw IPU Video",
|
| 1623 |
+
extension=".ipu",
|
| 1624 |
+
priority=["pyav"],
|
| 1625 |
+
),
|
| 1626 |
+
FileExtension(
|
| 1627 |
+
name="raw MJPEG 2000 video",
|
| 1628 |
+
extension=".j2k",
|
| 1629 |
+
priority=["pyav"],
|
| 1630 |
+
),
|
| 1631 |
+
FileExtension(
|
| 1632 |
+
name="raw MJPEG video",
|
| 1633 |
+
extension=".mjpeg",
|
| 1634 |
+
priority=["pyav"],
|
| 1635 |
+
),
|
| 1636 |
+
FileExtension(
|
| 1637 |
+
name="raw MJPEG video",
|
| 1638 |
+
extension=".mjpg",
|
| 1639 |
+
priority=["pyav"],
|
| 1640 |
+
),
|
| 1641 |
+
FileExtension(
|
| 1642 |
+
name="raw MJPEG video",
|
| 1643 |
+
extension=".mpo",
|
| 1644 |
+
priority=["pyav"],
|
| 1645 |
+
),
|
| 1646 |
+
FileExtension(
|
| 1647 |
+
name="raw MPEG-1 video",
|
| 1648 |
+
extension=".m1v",
|
| 1649 |
+
priority=["pyav"],
|
| 1650 |
+
),
|
| 1651 |
+
FileExtension(
|
| 1652 |
+
name="raw MPEG-1 video",
|
| 1653 |
+
extension=".mpeg",
|
| 1654 |
+
priority=["pyav"],
|
| 1655 |
+
),
|
| 1656 |
+
FileExtension(
|
| 1657 |
+
name="raw MPEG-1 video",
|
| 1658 |
+
extension=".mpg",
|
| 1659 |
+
priority=["pyav"],
|
| 1660 |
+
),
|
| 1661 |
+
FileExtension(
|
| 1662 |
+
name="raw MPEG-2 video",
|
| 1663 |
+
extension=".m2v",
|
| 1664 |
+
priority=["pyav"],
|
| 1665 |
+
),
|
| 1666 |
+
FileExtension(
|
| 1667 |
+
name="raw MPEG-4 video",
|
| 1668 |
+
extension=".m4v",
|
| 1669 |
+
priority=["pyav"],
|
| 1670 |
+
),
|
| 1671 |
+
FileExtension(
|
| 1672 |
+
name="raw VC-1 video",
|
| 1673 |
+
extension=".vc1",
|
| 1674 |
+
priority=["pyav"],
|
| 1675 |
+
),
|
| 1676 |
+
FileExtension(
|
| 1677 |
+
name="raw video",
|
| 1678 |
+
extension=".cif",
|
| 1679 |
+
priority=["pyav"],
|
| 1680 |
+
),
|
| 1681 |
+
FileExtension(
|
| 1682 |
+
name="raw video",
|
| 1683 |
+
extension=".qcif",
|
| 1684 |
+
priority=["pyav"],
|
| 1685 |
+
),
|
| 1686 |
+
FileExtension(
|
| 1687 |
+
name="raw video",
|
| 1688 |
+
extension=".rgb",
|
| 1689 |
+
priority=["pyav"],
|
| 1690 |
+
),
|
| 1691 |
+
FileExtension(
|
| 1692 |
+
name="raw video",
|
| 1693 |
+
extension=".yuv",
|
| 1694 |
+
priority=["pyav"],
|
| 1695 |
+
),
|
| 1696 |
+
FileExtension(
|
| 1697 |
+
name="RealMedia",
|
| 1698 |
+
extension=".rm",
|
| 1699 |
+
priority=["pyav"],
|
| 1700 |
+
),
|
| 1701 |
+
FileExtension(
|
| 1702 |
+
name="SDR2",
|
| 1703 |
+
extension=".sdr2",
|
| 1704 |
+
priority=["pyav"],
|
| 1705 |
+
),
|
| 1706 |
+
FileExtension(
|
| 1707 |
+
name="Sega FILM / CPK",
|
| 1708 |
+
extension=".cpk",
|
| 1709 |
+
priority=["pyav"],
|
| 1710 |
+
),
|
| 1711 |
+
FileExtension(
|
| 1712 |
+
name="SER (Simple uncompressed video format for astronomical capturing)",
|
| 1713 |
+
extension=".ser",
|
| 1714 |
+
priority=["pyav"],
|
| 1715 |
+
),
|
| 1716 |
+
FileExtension(
|
| 1717 |
+
name="Simbiosis Interactive IMX",
|
| 1718 |
+
extension=".imx",
|
| 1719 |
+
priority=["pyav"],
|
| 1720 |
+
),
|
| 1721 |
+
FileExtension(
|
| 1722 |
+
name="Square SVS",
|
| 1723 |
+
extension=".svs",
|
| 1724 |
+
priority=["tifffile", "pyav"],
|
| 1725 |
+
),
|
| 1726 |
+
FileExtension(
|
| 1727 |
+
name="TiVo TY Stream",
|
| 1728 |
+
extension=".ty",
|
| 1729 |
+
priority=["pyav"],
|
| 1730 |
+
),
|
| 1731 |
+
FileExtension(
|
| 1732 |
+
name="TiVo TY Stream",
|
| 1733 |
+
extension=".ty+",
|
| 1734 |
+
priority=["pyav"],
|
| 1735 |
+
),
|
| 1736 |
+
FileExtension(
|
| 1737 |
+
name="Uncompressed 4:2:2 10-bit",
|
| 1738 |
+
extension=".v210",
|
| 1739 |
+
priority=["pyav"],
|
| 1740 |
+
),
|
| 1741 |
+
FileExtension(
|
| 1742 |
+
name="Uncompressed 4:2:2 10-bit",
|
| 1743 |
+
extension=".yuv10",
|
| 1744 |
+
priority=["pyav"],
|
| 1745 |
+
),
|
| 1746 |
+
FileExtension(
|
| 1747 |
+
name="VC-1 test bitstream",
|
| 1748 |
+
extension=".rcv",
|
| 1749 |
+
priority=["pyav"],
|
| 1750 |
+
),
|
| 1751 |
+
FileExtension(
|
| 1752 |
+
name="Video CCTV DAT",
|
| 1753 |
+
extension=".dat",
|
| 1754 |
+
priority=["pyav"],
|
| 1755 |
+
),
|
| 1756 |
+
FileExtension(
|
| 1757 |
+
name="Video DAV",
|
| 1758 |
+
extension=".dav",
|
| 1759 |
+
priority=["pyav"],
|
| 1760 |
+
),
|
| 1761 |
+
FileExtension(
|
| 1762 |
+
name="Vivo",
|
| 1763 |
+
extension=".viv",
|
| 1764 |
+
priority=["pyav"],
|
| 1765 |
+
),
|
| 1766 |
+
FileExtension(
|
| 1767 |
+
name="WebM Chunk Muxer",
|
| 1768 |
+
extension=".chk",
|
| 1769 |
+
priority=["pyav"],
|
| 1770 |
+
),
|
| 1771 |
+
FileExtension(
|
| 1772 |
+
name="WebM",
|
| 1773 |
+
extension=".mk3d",
|
| 1774 |
+
priority=["pyav"],
|
| 1775 |
+
),
|
| 1776 |
+
FileExtension(
|
| 1777 |
+
name="WebM",
|
| 1778 |
+
extension=".mka",
|
| 1779 |
+
priority=["pyav"],
|
| 1780 |
+
),
|
| 1781 |
+
FileExtension(
|
| 1782 |
+
name="WebM",
|
| 1783 |
+
extension=".mks",
|
| 1784 |
+
priority=["pyav"],
|
| 1785 |
+
),
|
| 1786 |
+
FileExtension(
|
| 1787 |
+
name="Windows Television (WTV)",
|
| 1788 |
+
extension=".wtv",
|
| 1789 |
+
priority=["pyav"],
|
| 1790 |
+
),
|
| 1791 |
+
FileExtension(
|
| 1792 |
+
name="Xilam DERF",
|
| 1793 |
+
extension=".adp",
|
| 1794 |
+
priority=["pyav"],
|
| 1795 |
+
),
|
| 1796 |
+
FileExtension(
|
| 1797 |
+
name="YUV4MPEG pipe",
|
| 1798 |
+
extension=".y4m",
|
| 1799 |
+
priority=["pyav"],
|
| 1800 |
+
),
|
| 1801 |
+
FileExtension(
|
| 1802 |
+
extension=".qpi",
|
| 1803 |
+
priority=["tifffile"],
|
| 1804 |
+
),
|
| 1805 |
+
FileExtension(
|
| 1806 |
+
name="PCO Camera",
|
| 1807 |
+
extension=".pcoraw",
|
| 1808 |
+
priority=["tifffile"],
|
| 1809 |
+
),
|
| 1810 |
+
FileExtension(
|
| 1811 |
+
name="PCO Camera",
|
| 1812 |
+
extension=".rec",
|
| 1813 |
+
priority=["tifffile"],
|
| 1814 |
+
),
|
| 1815 |
+
FileExtension(
|
| 1816 |
+
name="Perkin Elmer Vectra",
|
| 1817 |
+
extension=".qptiff",
|
| 1818 |
+
priority=["tifffile"],
|
| 1819 |
+
),
|
| 1820 |
+
FileExtension(
|
| 1821 |
+
name="Pyramid Encoded TIFF",
|
| 1822 |
+
extension=".ptiff",
|
| 1823 |
+
priority=["tifffile"],
|
| 1824 |
+
),
|
| 1825 |
+
FileExtension(
|
| 1826 |
+
name="Pyramid Encoded TIFF",
|
| 1827 |
+
extension=".ptif",
|
| 1828 |
+
priority=["tifffile"],
|
| 1829 |
+
),
|
| 1830 |
+
FileExtension(
|
| 1831 |
+
name="Opticks Gel",
|
| 1832 |
+
extension=".gel",
|
| 1833 |
+
priority=["tifffile"],
|
| 1834 |
+
),
|
| 1835 |
+
FileExtension(
|
| 1836 |
+
name="Zoomify Image Format",
|
| 1837 |
+
extension=".zif",
|
| 1838 |
+
priority=["tifffile"],
|
| 1839 |
+
),
|
| 1840 |
+
FileExtension(
|
| 1841 |
+
name="Hamamatsu Slide Scanner",
|
| 1842 |
+
extension=".ndpi",
|
| 1843 |
+
priority=["tifffile"],
|
| 1844 |
+
),
|
| 1845 |
+
FileExtension(
|
| 1846 |
+
name="Roche Digital Pathology",
|
| 1847 |
+
extension=".bif",
|
| 1848 |
+
priority=["tifffile"],
|
| 1849 |
+
),
|
| 1850 |
+
FileExtension(
|
| 1851 |
+
extension=".tf8",
|
| 1852 |
+
priority=["tifffile"],
|
| 1853 |
+
),
|
| 1854 |
+
FileExtension(
|
| 1855 |
+
extension=".btf",
|
| 1856 |
+
priority=["tifffile"],
|
| 1857 |
+
),
|
| 1858 |
+
FileExtension(
|
| 1859 |
+
name="High Efficiency Image File Format",
|
| 1860 |
+
extension=".heic",
|
| 1861 |
+
priority=["pillow"],
|
| 1862 |
+
),
|
| 1863 |
+
FileExtension(
|
| 1864 |
+
name="AV1 Image File Format",
|
| 1865 |
+
extension=".avif",
|
| 1866 |
+
priority=["pillow"],
|
| 1867 |
+
),
|
| 1868 |
+
]
|
| 1869 |
+
extension_list.sort(key=lambda x: x.extension)
|
| 1870 |
+
|
| 1871 |
+
|
| 1872 |
+
known_extensions = dict()
|
| 1873 |
+
for ext in extension_list:
|
| 1874 |
+
if ext.extension not in known_extensions:
|
| 1875 |
+
known_extensions[ext.extension] = list()
|
| 1876 |
+
known_extensions[ext.extension].append(ext)
|
| 1877 |
+
|
| 1878 |
+
extension_list = [ext for ext_list in known_extensions.values() for ext in ext_list]
|
| 1879 |
+
|
| 1880 |
+
_video_extension_strings = [
|
| 1881 |
+
".264",
|
| 1882 |
+
".265",
|
| 1883 |
+
".3g2",
|
| 1884 |
+
".3gp",
|
| 1885 |
+
".a64",
|
| 1886 |
+
".A64",
|
| 1887 |
+
".adp",
|
| 1888 |
+
".amr",
|
| 1889 |
+
".amv",
|
| 1890 |
+
".asf",
|
| 1891 |
+
".avc",
|
| 1892 |
+
".avi",
|
| 1893 |
+
".avr",
|
| 1894 |
+
".avs",
|
| 1895 |
+
".avs2",
|
| 1896 |
+
".avs3",
|
| 1897 |
+
".bmv",
|
| 1898 |
+
".cavs",
|
| 1899 |
+
".cdg",
|
| 1900 |
+
".cdxl",
|
| 1901 |
+
".cgi",
|
| 1902 |
+
".chk",
|
| 1903 |
+
".cif",
|
| 1904 |
+
".cpk",
|
| 1905 |
+
".dat",
|
| 1906 |
+
".dav",
|
| 1907 |
+
".dif",
|
| 1908 |
+
".dnxhd",
|
| 1909 |
+
".dnxhr",
|
| 1910 |
+
".drc",
|
| 1911 |
+
".dv",
|
| 1912 |
+
".dvd",
|
| 1913 |
+
".f4v",
|
| 1914 |
+
".flm",
|
| 1915 |
+
".flv",
|
| 1916 |
+
".gsm",
|
| 1917 |
+
".gxf",
|
| 1918 |
+
".h261",
|
| 1919 |
+
".h263",
|
| 1920 |
+
".h264",
|
| 1921 |
+
".h265",
|
| 1922 |
+
".h26l",
|
| 1923 |
+
".hevc",
|
| 1924 |
+
".idf",
|
| 1925 |
+
".ifv",
|
| 1926 |
+
".imx",
|
| 1927 |
+
".ipu",
|
| 1928 |
+
".ism",
|
| 1929 |
+
".isma",
|
| 1930 |
+
".ismv",
|
| 1931 |
+
".ivf",
|
| 1932 |
+
".ivr",
|
| 1933 |
+
".j2k",
|
| 1934 |
+
".kux",
|
| 1935 |
+
".lvf",
|
| 1936 |
+
".m1v",
|
| 1937 |
+
".m2t",
|
| 1938 |
+
".m2ts",
|
| 1939 |
+
".m2v",
|
| 1940 |
+
".m4a",
|
| 1941 |
+
".m4b",
|
| 1942 |
+
".m4v",
|
| 1943 |
+
".mj2",
|
| 1944 |
+
".mjpeg",
|
| 1945 |
+
".mjpg",
|
| 1946 |
+
".mk3d",
|
| 1947 |
+
".mka",
|
| 1948 |
+
".mks",
|
| 1949 |
+
".mkv",
|
| 1950 |
+
".mods",
|
| 1951 |
+
".moflex",
|
| 1952 |
+
".mov",
|
| 1953 |
+
".mp4",
|
| 1954 |
+
".mpc",
|
| 1955 |
+
".mpd",
|
| 1956 |
+
".mpeg",
|
| 1957 |
+
".mpg",
|
| 1958 |
+
".mpo",
|
| 1959 |
+
".mts",
|
| 1960 |
+
".mvi",
|
| 1961 |
+
".mxf",
|
| 1962 |
+
".mxg",
|
| 1963 |
+
".nut",
|
| 1964 |
+
".obu",
|
| 1965 |
+
".ogg",
|
| 1966 |
+
".ogv",
|
| 1967 |
+
".psp",
|
| 1968 |
+
".qcif",
|
| 1969 |
+
".rcv",
|
| 1970 |
+
".rgb",
|
| 1971 |
+
".rm",
|
| 1972 |
+
".roq",
|
| 1973 |
+
".sdr2",
|
| 1974 |
+
".ser",
|
| 1975 |
+
".sga",
|
| 1976 |
+
".svag",
|
| 1977 |
+
".svs",
|
| 1978 |
+
".ts",
|
| 1979 |
+
".ty",
|
| 1980 |
+
".ty+",
|
| 1981 |
+
".v",
|
| 1982 |
+
".v210",
|
| 1983 |
+
".vb",
|
| 1984 |
+
".vc1",
|
| 1985 |
+
".vc2",
|
| 1986 |
+
".viv",
|
| 1987 |
+
".vob",
|
| 1988 |
+
".webm",
|
| 1989 |
+
".wmv",
|
| 1990 |
+
".wtv",
|
| 1991 |
+
".xl",
|
| 1992 |
+
".xmv",
|
| 1993 |
+
".y4m",
|
| 1994 |
+
".yop",
|
| 1995 |
+
".yuv",
|
| 1996 |
+
".yuv10",
|
| 1997 |
+
]
|
| 1998 |
+
video_extensions = list()
|
| 1999 |
+
for ext_string in _video_extension_strings:
|
| 2000 |
+
formats = known_extensions[ext_string]
|
| 2001 |
+
video_extensions.append(formats[0])
|
| 2002 |
+
video_extensions.sort(key=lambda x: x.extension)
|
parrot/lib/python3.10/site-packages/imageio/config/extensions.pyi
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Dict, Optional
|
| 2 |
+
|
| 3 |
+
class FileExtension:
|
| 4 |
+
extension: str
|
| 5 |
+
priority: List[str]
|
| 6 |
+
name: Optional[str] = None
|
| 7 |
+
description: Optional[str] = None
|
| 8 |
+
external_link: Optional[str] = None
|
| 9 |
+
volume_support: bool
|
| 10 |
+
|
| 11 |
+
def __init__(
|
| 12 |
+
self,
|
| 13 |
+
*,
|
| 14 |
+
extension: str,
|
| 15 |
+
priority: List[str],
|
| 16 |
+
name: str = None,
|
| 17 |
+
description: str = None,
|
| 18 |
+
external_link: str = None
|
| 19 |
+
) -> None: ...
|
| 20 |
+
def reset(self) -> None: ...
|
| 21 |
+
|
| 22 |
+
extension_list: List[FileExtension]
|
| 23 |
+
known_extensions: Dict[str, List[FileExtension]]
|
| 24 |
+
video_extensions: List[FileExtension]
|
parrot/lib/python3.10/site-packages/imageio/config/plugins.py
ADDED
|
@@ -0,0 +1,782 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
|
| 3 |
+
from ..core.legacy_plugin_wrapper import LegacyPlugin
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class PluginConfig:
|
| 7 |
+
"""Plugin Configuration Metadata
|
| 8 |
+
|
| 9 |
+
This class holds the information needed to lazy-import plugins.
|
| 10 |
+
|
| 11 |
+
Parameters
|
| 12 |
+
----------
|
| 13 |
+
name : str
|
| 14 |
+
The name of the plugin.
|
| 15 |
+
class_name : str
|
| 16 |
+
The name of the plugin class inside the plugin module.
|
| 17 |
+
module_name : str
|
| 18 |
+
The name of the module/package from which to import the plugin.
|
| 19 |
+
is_legacy : bool
|
| 20 |
+
If True, this plugin is a v2 plugin and will be wrapped in a
|
| 21 |
+
LegacyPlugin. Default: False.
|
| 22 |
+
package_name : str
|
| 23 |
+
If the given module name points to a relative module, then the package
|
| 24 |
+
name determines the package it is relative to.
|
| 25 |
+
install_name : str
|
| 26 |
+
The name of the optional dependency that can be used to install this
|
| 27 |
+
plugin if it is missing.
|
| 28 |
+
legacy_args : Dict
|
| 29 |
+
A dictionary of kwargs to pass to the v2 plugin (Format) upon construction.
|
| 30 |
+
|
| 31 |
+
Examples
|
| 32 |
+
--------
|
| 33 |
+
>>> PluginConfig(
|
| 34 |
+
name="TIFF",
|
| 35 |
+
class_name="TiffFormat",
|
| 36 |
+
module_name="imageio.plugins.tifffile",
|
| 37 |
+
is_legacy=True,
|
| 38 |
+
install_name="tifffile",
|
| 39 |
+
legacy_args={
|
| 40 |
+
"description": "TIFF format",
|
| 41 |
+
"extensions": ".tif .tiff .stk .lsm",
|
| 42 |
+
"modes": "iIvV",
|
| 43 |
+
},
|
| 44 |
+
)
|
| 45 |
+
>>> PluginConfig(
|
| 46 |
+
name="pillow",
|
| 47 |
+
class_name="PillowPlugin",
|
| 48 |
+
module_name="imageio.plugins.pillow"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(
|
| 54 |
+
self,
|
| 55 |
+
name,
|
| 56 |
+
class_name,
|
| 57 |
+
module_name,
|
| 58 |
+
*,
|
| 59 |
+
is_legacy=False,
|
| 60 |
+
package_name=None,
|
| 61 |
+
install_name=None,
|
| 62 |
+
legacy_args=None,
|
| 63 |
+
):
|
| 64 |
+
legacy_args = legacy_args or dict()
|
| 65 |
+
|
| 66 |
+
self.name = name
|
| 67 |
+
self.class_name = class_name
|
| 68 |
+
self.module_name = module_name
|
| 69 |
+
self.package_name = package_name
|
| 70 |
+
|
| 71 |
+
self.is_legacy = is_legacy
|
| 72 |
+
self.install_name = install_name or self.name
|
| 73 |
+
self.legacy_args = {"name": name, "description": "A legacy plugin"}
|
| 74 |
+
self.legacy_args.update(legacy_args)
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def format(self):
|
| 78 |
+
"""For backwards compatibility with FormatManager
|
| 79 |
+
|
| 80 |
+
Delete when migrating to v3
|
| 81 |
+
"""
|
| 82 |
+
if not self.is_legacy:
|
| 83 |
+
raise RuntimeError("Can only get format for legacy plugins.")
|
| 84 |
+
|
| 85 |
+
module = importlib.import_module(self.module_name, self.package_name)
|
| 86 |
+
clazz = getattr(module, self.class_name)
|
| 87 |
+
return clazz(**self.legacy_args)
|
| 88 |
+
|
| 89 |
+
@property
|
| 90 |
+
def plugin_class(self):
|
| 91 |
+
"""Get the plugin class (import if needed)
|
| 92 |
+
|
| 93 |
+
Returns
|
| 94 |
+
-------
|
| 95 |
+
plugin_class : Any
|
| 96 |
+
The class that can be used to instantiate plugins.
|
| 97 |
+
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
module = importlib.import_module(self.module_name, self.package_name)
|
| 101 |
+
clazz = getattr(module, self.class_name)
|
| 102 |
+
|
| 103 |
+
if self.is_legacy:
|
| 104 |
+
legacy_plugin = clazz(**self.legacy_args)
|
| 105 |
+
|
| 106 |
+
def partial_legacy_plugin(request):
|
| 107 |
+
return LegacyPlugin(request, legacy_plugin)
|
| 108 |
+
|
| 109 |
+
clazz = partial_legacy_plugin
|
| 110 |
+
|
| 111 |
+
return clazz
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
known_plugins = dict()
|
| 115 |
+
known_plugins["pillow"] = PluginConfig(
|
| 116 |
+
name="pillow", class_name="PillowPlugin", module_name="imageio.plugins.pillow"
|
| 117 |
+
)
|
| 118 |
+
known_plugins["pyav"] = PluginConfig(
|
| 119 |
+
name="pyav", class_name="PyAVPlugin", module_name="imageio.plugins.pyav"
|
| 120 |
+
)
|
| 121 |
+
known_plugins["opencv"] = PluginConfig(
|
| 122 |
+
name="opencv", class_name="OpenCVPlugin", module_name="imageio.plugins.opencv"
|
| 123 |
+
)
|
| 124 |
+
known_plugins["tifffile"] = PluginConfig(
|
| 125 |
+
name="tifffile",
|
| 126 |
+
class_name="TifffilePlugin",
|
| 127 |
+
module_name="imageio.plugins.tifffile_v3",
|
| 128 |
+
)
|
| 129 |
+
known_plugins["SPE"] = PluginConfig(
|
| 130 |
+
name="spe", class_name="SpePlugin", module_name="imageio.plugins.spe"
|
| 131 |
+
)
|
| 132 |
+
known_plugins["rawpy"] = PluginConfig(
|
| 133 |
+
name="rawpy", class_name="RawPyPlugin", module_name="imageio.plugins.rawpy"
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# Legacy plugins
|
| 137 |
+
# ==============
|
| 138 |
+
#
|
| 139 |
+
# Which are partly registered by format, partly by plugin, and partly by a mix
|
| 140 |
+
# of both. We keep the naming here for backwards compatibility.
|
| 141 |
+
# In v3 this should become a single entry per plugin named after the plugin
|
| 142 |
+
# We can choose extension-specific priority in ``config.extensions``.
|
| 143 |
+
#
|
| 144 |
+
# Note: Since python 3.7 order of insertion determines the order of dict().keys()
|
| 145 |
+
# This means that the order here determines the order by which plugins are
|
| 146 |
+
# checked during the full fallback search. We don't advertise this downstream,
|
| 147 |
+
# but it could be a useful thing to keep in mind to choose a sensible default
|
| 148 |
+
# search order.
|
| 149 |
+
|
| 150 |
+
known_plugins["TIFF"] = PluginConfig(
|
| 151 |
+
name="TIFF",
|
| 152 |
+
class_name="TiffFormat",
|
| 153 |
+
module_name="imageio.plugins.tifffile",
|
| 154 |
+
is_legacy=True,
|
| 155 |
+
install_name="tifffile",
|
| 156 |
+
legacy_args={
|
| 157 |
+
"description": "TIFF format",
|
| 158 |
+
"extensions": ".tif .tiff .stk .lsm",
|
| 159 |
+
"modes": "iIvV",
|
| 160 |
+
},
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
# PILLOW plugin formats (legacy)
|
| 164 |
+
PILLOW_FORMATS = [
|
| 165 |
+
("BMP", "Windows Bitmap", ".bmp", "PillowFormat"),
|
| 166 |
+
("BUFR", "BUFR", ".bufr", "PillowFormat"),
|
| 167 |
+
("CUR", "Windows Cursor", ".cur", "PillowFormat"),
|
| 168 |
+
("DCX", "Intel DCX", ".dcx", "PillowFormat"),
|
| 169 |
+
("DDS", "DirectDraw Surface", ".dds", "PillowFormat"),
|
| 170 |
+
("DIB", "Windows Bitmap", "", "PillowFormat"),
|
| 171 |
+
("EPS", "Encapsulated Postscript", ".ps .eps", "PillowFormat"),
|
| 172 |
+
("FITS", "FITS", ".fit .fits", "PillowFormat"),
|
| 173 |
+
("FLI", "Autodesk FLI/FLC Animation", ".fli .flc", "PillowFormat"),
|
| 174 |
+
("FPX", "FlashPix", ".fpx", "PillowFormat"),
|
| 175 |
+
("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu", "PillowFormat"),
|
| 176 |
+
("GBR", "GIMP brush file", ".gbr", "PillowFormat"),
|
| 177 |
+
("GIF", "Compuserve GIF", ".gif", "GIFFormat"),
|
| 178 |
+
("GRIB", "GRIB", ".grib", "PillowFormat"),
|
| 179 |
+
("HDF5", "HDF5", ".h5 .hdf", "PillowFormat"),
|
| 180 |
+
("ICNS", "Mac OS icns resource", ".icns", "PillowFormat"),
|
| 181 |
+
("ICO", "Windows Icon", ".ico", "PillowFormat"),
|
| 182 |
+
("IM", "IFUNC Image Memory", ".im", "PillowFormat"),
|
| 183 |
+
("IMT", "IM Tools", "", "PillowFormat"),
|
| 184 |
+
("IPTC", "IPTC/NAA", ".iim", "PillowFormat"),
|
| 185 |
+
("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg", "JPEGFormat"),
|
| 186 |
+
(
|
| 187 |
+
"JPEG2000",
|
| 188 |
+
"JPEG 2000 (ISO 15444)",
|
| 189 |
+
".jp2 .j2k .jpc .jpf .jpx .j2c",
|
| 190 |
+
"JPEG2000Format",
|
| 191 |
+
),
|
| 192 |
+
("MCIDAS", "McIdas area file", "", "PillowFormat"),
|
| 193 |
+
("MIC", "Microsoft Image Composer", ".mic", "PillowFormat"),
|
| 194 |
+
# skipped in legacy pillow
|
| 195 |
+
# ("MPEG", "MPEG", ".mpg .mpeg", "PillowFormat"),
|
| 196 |
+
("MPO", "MPO (CIPA DC-007)", ".mpo", "PillowFormat"),
|
| 197 |
+
("MSP", "Windows Paint", ".msp", "PillowFormat"),
|
| 198 |
+
("PCD", "Kodak PhotoCD", ".pcd", "PillowFormat"),
|
| 199 |
+
("PCX", "Paintbrush", ".pcx", "PillowFormat"),
|
| 200 |
+
("PIXAR", "PIXAR raster image", ".pxr", "PillowFormat"),
|
| 201 |
+
("PNG", "Portable network graphics", ".png", "PNGFormat"),
|
| 202 |
+
("PPM", "Pbmplus image", ".pbm .pgm .ppm", "PillowFormat"),
|
| 203 |
+
("PSD", "Adobe Photoshop", ".psd", "PillowFormat"),
|
| 204 |
+
("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi", "PillowFormat"),
|
| 205 |
+
("SPIDER", "Spider 2D image", "", "PillowFormat"),
|
| 206 |
+
("SUN", "Sun Raster File", ".ras", "PillowFormat"),
|
| 207 |
+
("TGA", "Targa", ".tga", "PillowFormat"),
|
| 208 |
+
("TIFF", "Adobe TIFF", ".tif .tiff", "TIFFFormat"),
|
| 209 |
+
("WMF", "Windows Metafile", ".wmf .emf", "PillowFormat"),
|
| 210 |
+
("XBM", "X11 Bitmap", ".xbm", "PillowFormat"),
|
| 211 |
+
("XPM", "X11 Pixel Map", ".xpm", "PillowFormat"),
|
| 212 |
+
("XVTHUMB", "XV thumbnail image", "", "PillowFormat"),
|
| 213 |
+
]
|
| 214 |
+
for id, summary, ext, class_name in PILLOW_FORMATS:
|
| 215 |
+
config = PluginConfig(
|
| 216 |
+
name=id.upper() + "-PIL",
|
| 217 |
+
class_name=class_name,
|
| 218 |
+
module_name="imageio.plugins.pillow_legacy",
|
| 219 |
+
is_legacy=True,
|
| 220 |
+
install_name="pillow",
|
| 221 |
+
legacy_args={
|
| 222 |
+
"description": summary + " via Pillow",
|
| 223 |
+
"extensions": ext,
|
| 224 |
+
"modes": "iI" if class_name == "GIFFormat" else "i",
|
| 225 |
+
"plugin_id": id,
|
| 226 |
+
},
|
| 227 |
+
)
|
| 228 |
+
known_plugins[config.name] = config
|
| 229 |
+
|
| 230 |
+
known_plugins["FFMPEG"] = PluginConfig(
|
| 231 |
+
name="FFMPEG",
|
| 232 |
+
class_name="FfmpegFormat",
|
| 233 |
+
module_name="imageio.plugins.ffmpeg",
|
| 234 |
+
is_legacy=True,
|
| 235 |
+
install_name="ffmpeg",
|
| 236 |
+
legacy_args={
|
| 237 |
+
"description": "Many video formats and cameras (via ffmpeg)",
|
| 238 |
+
"extensions": ".mov .avi .mpg .mpeg .mp4 .mkv .webm .wmv .h264",
|
| 239 |
+
"modes": "I",
|
| 240 |
+
},
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
known_plugins["BSDF"] = PluginConfig(
|
| 244 |
+
name="BSDF",
|
| 245 |
+
class_name="BsdfFormat",
|
| 246 |
+
module_name="imageio.plugins.bsdf",
|
| 247 |
+
is_legacy=True,
|
| 248 |
+
install_name="bsdf",
|
| 249 |
+
legacy_args={
|
| 250 |
+
"description": "Format based on the Binary Structured Data Format",
|
| 251 |
+
"extensions": ".bsdf",
|
| 252 |
+
"modes": "iIvV",
|
| 253 |
+
},
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
known_plugins["DICOM"] = PluginConfig(
|
| 257 |
+
name="DICOM",
|
| 258 |
+
class_name="DicomFormat",
|
| 259 |
+
module_name="imageio.plugins.dicom",
|
| 260 |
+
is_legacy=True,
|
| 261 |
+
install_name="dicom",
|
| 262 |
+
legacy_args={
|
| 263 |
+
"description": "Digital Imaging and Communications in Medicine",
|
| 264 |
+
"extensions": ".dcm .ct .mri",
|
| 265 |
+
"modes": "iIvV",
|
| 266 |
+
},
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
known_plugins["FEI"] = PluginConfig(
|
| 270 |
+
name="FEI",
|
| 271 |
+
class_name="FEISEMFormat",
|
| 272 |
+
module_name="imageio.plugins.feisem",
|
| 273 |
+
is_legacy=True,
|
| 274 |
+
install_name="feisem",
|
| 275 |
+
legacy_args={
|
| 276 |
+
"description": "FEI-SEM TIFF format",
|
| 277 |
+
"extensions": [".tif", ".tiff"],
|
| 278 |
+
"modes": "iv",
|
| 279 |
+
},
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
known_plugins["FITS"] = PluginConfig(
|
| 283 |
+
name="FITS",
|
| 284 |
+
class_name="FitsFormat",
|
| 285 |
+
module_name="imageio.plugins.fits",
|
| 286 |
+
is_legacy=True,
|
| 287 |
+
install_name="fits",
|
| 288 |
+
legacy_args={
|
| 289 |
+
"description": "Flexible Image Transport System (FITS) format",
|
| 290 |
+
"extensions": ".fits .fit .fts .fz",
|
| 291 |
+
"modes": "iIvV",
|
| 292 |
+
},
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
known_plugins["GDAL"] = PluginConfig(
|
| 296 |
+
name="GDAL",
|
| 297 |
+
class_name="GdalFormat",
|
| 298 |
+
module_name="imageio.plugins.gdal",
|
| 299 |
+
is_legacy=True,
|
| 300 |
+
install_name="gdal",
|
| 301 |
+
legacy_args={
|
| 302 |
+
"description": "Geospatial Data Abstraction Library",
|
| 303 |
+
"extensions": ".tiff .tif .img .ecw .jpg .jpeg",
|
| 304 |
+
"modes": "iIvV",
|
| 305 |
+
},
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
known_plugins["ITK"] = PluginConfig(
|
| 309 |
+
name="ITK",
|
| 310 |
+
class_name="ItkFormat",
|
| 311 |
+
module_name="imageio.plugins.simpleitk",
|
| 312 |
+
is_legacy=True,
|
| 313 |
+
install_name="simpleitk",
|
| 314 |
+
legacy_args={
|
| 315 |
+
"description": "Insight Segmentation and Registration Toolkit (ITK) format",
|
| 316 |
+
"extensions": " ".join(
|
| 317 |
+
(
|
| 318 |
+
".gipl",
|
| 319 |
+
".ipl",
|
| 320 |
+
".mha",
|
| 321 |
+
".mhd",
|
| 322 |
+
".nhdr",
|
| 323 |
+
".nia",
|
| 324 |
+
".hdr",
|
| 325 |
+
".nrrd",
|
| 326 |
+
".nii",
|
| 327 |
+
".nii.gz",
|
| 328 |
+
".img",
|
| 329 |
+
".img.gz",
|
| 330 |
+
".vtk",
|
| 331 |
+
".hdf5",
|
| 332 |
+
".lsm",
|
| 333 |
+
".mnc",
|
| 334 |
+
".mnc2",
|
| 335 |
+
".mgh",
|
| 336 |
+
".mnc",
|
| 337 |
+
".pic",
|
| 338 |
+
".bmp",
|
| 339 |
+
".jpeg",
|
| 340 |
+
".jpg",
|
| 341 |
+
".png",
|
| 342 |
+
".tiff",
|
| 343 |
+
".tif",
|
| 344 |
+
".dicom",
|
| 345 |
+
".dcm",
|
| 346 |
+
".gdcm",
|
| 347 |
+
)
|
| 348 |
+
),
|
| 349 |
+
"modes": "iIvV",
|
| 350 |
+
},
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
known_plugins["NPZ"] = PluginConfig(
|
| 354 |
+
name="NPZ",
|
| 355 |
+
class_name="NpzFormat",
|
| 356 |
+
module_name="imageio.plugins.npz",
|
| 357 |
+
is_legacy=True,
|
| 358 |
+
install_name="numpy",
|
| 359 |
+
legacy_args={
|
| 360 |
+
"description": "Numpy's compressed array format",
|
| 361 |
+
"extensions": ".npz",
|
| 362 |
+
"modes": "iIvV",
|
| 363 |
+
},
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
known_plugins["SWF"] = PluginConfig(
|
| 367 |
+
name="SWF",
|
| 368 |
+
class_name="SWFFormat",
|
| 369 |
+
module_name="imageio.plugins.swf",
|
| 370 |
+
is_legacy=True,
|
| 371 |
+
install_name="swf",
|
| 372 |
+
legacy_args={
|
| 373 |
+
"description": "Shockwave flash",
|
| 374 |
+
"extensions": ".swf",
|
| 375 |
+
"modes": "I",
|
| 376 |
+
},
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
known_plugins["SCREENGRAB"] = PluginConfig(
|
| 380 |
+
name="SCREENGRAB",
|
| 381 |
+
class_name="ScreenGrabFormat",
|
| 382 |
+
module_name="imageio.plugins.grab",
|
| 383 |
+
is_legacy=True,
|
| 384 |
+
install_name="pillow",
|
| 385 |
+
legacy_args={
|
| 386 |
+
"description": "Grab screenshots (Windows and OS X only)",
|
| 387 |
+
"extensions": [],
|
| 388 |
+
"modes": "i",
|
| 389 |
+
},
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
known_plugins["CLIPBOARDGRAB"] = PluginConfig(
|
| 393 |
+
name="CLIPBOARDGRAB",
|
| 394 |
+
class_name="ClipboardGrabFormat",
|
| 395 |
+
module_name="imageio.plugins.grab",
|
| 396 |
+
is_legacy=True,
|
| 397 |
+
install_name="pillow",
|
| 398 |
+
legacy_args={
|
| 399 |
+
"description": "Grab from clipboard (Windows only)",
|
| 400 |
+
"extensions": [],
|
| 401 |
+
"modes": "i",
|
| 402 |
+
},
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
# LYTRO plugin (legacy)
|
| 406 |
+
lytro_formats = [
|
| 407 |
+
("lytro-lfr", "Lytro Illum lfr image file", ".lfr", "i", "LytroLfrFormat"),
|
| 408 |
+
(
|
| 409 |
+
"lytro-illum-raw",
|
| 410 |
+
"Lytro Illum raw image file",
|
| 411 |
+
".raw",
|
| 412 |
+
"i",
|
| 413 |
+
"LytroIllumRawFormat",
|
| 414 |
+
),
|
| 415 |
+
("lytro-lfp", "Lytro F01 lfp image file", ".lfp", "i", "LytroLfpFormat"),
|
| 416 |
+
("lytro-f01-raw", "Lytro F01 raw image file", ".raw", "i", "LytroF01RawFormat"),
|
| 417 |
+
]
|
| 418 |
+
for name, des, ext, mode, class_name in lytro_formats:
|
| 419 |
+
config = PluginConfig(
|
| 420 |
+
name=name.upper(),
|
| 421 |
+
class_name=class_name,
|
| 422 |
+
module_name="imageio.plugins.lytro",
|
| 423 |
+
is_legacy=True,
|
| 424 |
+
install_name="lytro",
|
| 425 |
+
legacy_args={
|
| 426 |
+
"description": des,
|
| 427 |
+
"extensions": ext,
|
| 428 |
+
"modes": mode,
|
| 429 |
+
},
|
| 430 |
+
)
|
| 431 |
+
known_plugins[config.name] = config
|
| 432 |
+
|
| 433 |
+
# FreeImage plugin (legacy)
|
| 434 |
+
FREEIMAGE_FORMATS = [
|
| 435 |
+
(
|
| 436 |
+
"BMP",
|
| 437 |
+
0,
|
| 438 |
+
"Windows or OS/2 Bitmap",
|
| 439 |
+
".bmp",
|
| 440 |
+
"i",
|
| 441 |
+
"FreeimageBmpFormat",
|
| 442 |
+
"imageio.plugins.freeimage",
|
| 443 |
+
),
|
| 444 |
+
(
|
| 445 |
+
"CUT",
|
| 446 |
+
21,
|
| 447 |
+
"Dr. Halo",
|
| 448 |
+
".cut",
|
| 449 |
+
"i",
|
| 450 |
+
"FreeimageFormat",
|
| 451 |
+
"imageio.plugins.freeimage",
|
| 452 |
+
),
|
| 453 |
+
(
|
| 454 |
+
"DDS",
|
| 455 |
+
24,
|
| 456 |
+
"DirectX Surface",
|
| 457 |
+
".dds",
|
| 458 |
+
"i",
|
| 459 |
+
"FreeimageFormat",
|
| 460 |
+
"imageio.plugins.freeimage",
|
| 461 |
+
),
|
| 462 |
+
(
|
| 463 |
+
"EXR",
|
| 464 |
+
29,
|
| 465 |
+
"ILM OpenEXR",
|
| 466 |
+
".exr",
|
| 467 |
+
"i",
|
| 468 |
+
"FreeimageFormat",
|
| 469 |
+
"imageio.plugins.freeimage",
|
| 470 |
+
),
|
| 471 |
+
(
|
| 472 |
+
"G3",
|
| 473 |
+
27,
|
| 474 |
+
"Raw fax format CCITT G.3",
|
| 475 |
+
".g3",
|
| 476 |
+
"i",
|
| 477 |
+
"FreeimageFormat",
|
| 478 |
+
"imageio.plugins.freeimage",
|
| 479 |
+
),
|
| 480 |
+
(
|
| 481 |
+
"GIF",
|
| 482 |
+
25,
|
| 483 |
+
"Static and animated gif (FreeImage)",
|
| 484 |
+
".gif",
|
| 485 |
+
"iI",
|
| 486 |
+
"GifFormat",
|
| 487 |
+
"imageio.plugins.freeimagemulti",
|
| 488 |
+
),
|
| 489 |
+
(
|
| 490 |
+
"HDR",
|
| 491 |
+
26,
|
| 492 |
+
"High Dynamic Range Image",
|
| 493 |
+
".hdr",
|
| 494 |
+
"i",
|
| 495 |
+
"FreeimageFormat",
|
| 496 |
+
"imageio.plugins.freeimage",
|
| 497 |
+
),
|
| 498 |
+
(
|
| 499 |
+
"ICO",
|
| 500 |
+
1,
|
| 501 |
+
"Windows Icon",
|
| 502 |
+
".ico",
|
| 503 |
+
"iI",
|
| 504 |
+
"IcoFormat",
|
| 505 |
+
"imageio.plugins.freeimagemulti",
|
| 506 |
+
),
|
| 507 |
+
(
|
| 508 |
+
"IFF",
|
| 509 |
+
5,
|
| 510 |
+
"IFF Interleaved Bitmap",
|
| 511 |
+
".iff .lbm",
|
| 512 |
+
"i",
|
| 513 |
+
"FreeimageFormat",
|
| 514 |
+
"imageio.plugins.freeimage",
|
| 515 |
+
),
|
| 516 |
+
(
|
| 517 |
+
"J2K",
|
| 518 |
+
30,
|
| 519 |
+
"JPEG-2000 codestream",
|
| 520 |
+
".j2k .j2c",
|
| 521 |
+
"i",
|
| 522 |
+
"FreeimageFormat",
|
| 523 |
+
"imageio.plugins.freeimage",
|
| 524 |
+
),
|
| 525 |
+
(
|
| 526 |
+
"JNG",
|
| 527 |
+
3,
|
| 528 |
+
"JPEG Network Graphics",
|
| 529 |
+
".jng",
|
| 530 |
+
"i",
|
| 531 |
+
"FreeimageFormat",
|
| 532 |
+
"imageio.plugins.freeimage",
|
| 533 |
+
),
|
| 534 |
+
(
|
| 535 |
+
"JP2",
|
| 536 |
+
31,
|
| 537 |
+
"JPEG-2000 File Format",
|
| 538 |
+
".jp2",
|
| 539 |
+
"i",
|
| 540 |
+
"FreeimageFormat",
|
| 541 |
+
"imageio.plugins.freeimage",
|
| 542 |
+
),
|
| 543 |
+
(
|
| 544 |
+
"JPEG",
|
| 545 |
+
2,
|
| 546 |
+
"JPEG - JFIF Compliant",
|
| 547 |
+
".jpg .jif .jpeg .jpe",
|
| 548 |
+
"i",
|
| 549 |
+
"FreeimageJpegFormat",
|
| 550 |
+
"imageio.plugins.freeimage",
|
| 551 |
+
),
|
| 552 |
+
(
|
| 553 |
+
"JPEG-XR",
|
| 554 |
+
36,
|
| 555 |
+
"JPEG XR image format",
|
| 556 |
+
".jxr .wdp .hdp",
|
| 557 |
+
"i",
|
| 558 |
+
"FreeimageFormat",
|
| 559 |
+
"imageio.plugins.freeimage",
|
| 560 |
+
),
|
| 561 |
+
(
|
| 562 |
+
"KOALA",
|
| 563 |
+
4,
|
| 564 |
+
"C64 Koala Graphics",
|
| 565 |
+
".koa",
|
| 566 |
+
"i",
|
| 567 |
+
"FreeimageFormat",
|
| 568 |
+
"imageio.plugins.freeimage",
|
| 569 |
+
),
|
| 570 |
+
# not registered in legacy pillow
|
| 571 |
+
# ("MNG", 6, "Multiple-image Network Graphics", ".mng", "i", "FreeimageFormat", "imageio.plugins.freeimage"),
|
| 572 |
+
(
|
| 573 |
+
"PBM",
|
| 574 |
+
7,
|
| 575 |
+
"Portable Bitmap (ASCII)",
|
| 576 |
+
".pbm",
|
| 577 |
+
"i",
|
| 578 |
+
"FreeimageFormat",
|
| 579 |
+
"imageio.plugins.freeimage",
|
| 580 |
+
),
|
| 581 |
+
(
|
| 582 |
+
"PBMRAW",
|
| 583 |
+
8,
|
| 584 |
+
"Portable Bitmap (RAW)",
|
| 585 |
+
".pbm",
|
| 586 |
+
"i",
|
| 587 |
+
"FreeimageFormat",
|
| 588 |
+
"imageio.plugins.freeimage",
|
| 589 |
+
),
|
| 590 |
+
(
|
| 591 |
+
"PCD",
|
| 592 |
+
9,
|
| 593 |
+
"Kodak PhotoCD",
|
| 594 |
+
".pcd",
|
| 595 |
+
"i",
|
| 596 |
+
"FreeimageFormat",
|
| 597 |
+
"imageio.plugins.freeimage",
|
| 598 |
+
),
|
| 599 |
+
(
|
| 600 |
+
"PCX",
|
| 601 |
+
10,
|
| 602 |
+
"Zsoft Paintbrush",
|
| 603 |
+
".pcx",
|
| 604 |
+
"i",
|
| 605 |
+
"FreeimageFormat",
|
| 606 |
+
"imageio.plugins.freeimage",
|
| 607 |
+
),
|
| 608 |
+
(
|
| 609 |
+
"PFM",
|
| 610 |
+
32,
|
| 611 |
+
"Portable floatmap",
|
| 612 |
+
".pfm",
|
| 613 |
+
"i",
|
| 614 |
+
"FreeimageFormat",
|
| 615 |
+
"imageio.plugins.freeimage",
|
| 616 |
+
),
|
| 617 |
+
(
|
| 618 |
+
"PGM",
|
| 619 |
+
11,
|
| 620 |
+
"Portable Greymap (ASCII)",
|
| 621 |
+
".pgm",
|
| 622 |
+
"i",
|
| 623 |
+
"FreeimageFormat",
|
| 624 |
+
"imageio.plugins.freeimage",
|
| 625 |
+
),
|
| 626 |
+
(
|
| 627 |
+
"PGMRAW",
|
| 628 |
+
12,
|
| 629 |
+
"Portable Greymap (RAW)",
|
| 630 |
+
".pgm",
|
| 631 |
+
"i",
|
| 632 |
+
"FreeimageFormat",
|
| 633 |
+
"imageio.plugins.freeimage",
|
| 634 |
+
),
|
| 635 |
+
(
|
| 636 |
+
"PICT",
|
| 637 |
+
33,
|
| 638 |
+
"Macintosh PICT",
|
| 639 |
+
".pct .pict .pic",
|
| 640 |
+
"i",
|
| 641 |
+
"FreeimageFormat",
|
| 642 |
+
"imageio.plugins.freeimage",
|
| 643 |
+
),
|
| 644 |
+
(
|
| 645 |
+
"PNG",
|
| 646 |
+
13,
|
| 647 |
+
"Portable Network Graphics",
|
| 648 |
+
".png",
|
| 649 |
+
"i",
|
| 650 |
+
"FreeimagePngFormat",
|
| 651 |
+
"imageio.plugins.freeimage",
|
| 652 |
+
),
|
| 653 |
+
(
|
| 654 |
+
"PPM",
|
| 655 |
+
14,
|
| 656 |
+
"Portable Pixelmap (ASCII)",
|
| 657 |
+
".ppm",
|
| 658 |
+
"i",
|
| 659 |
+
"FreeimagePnmFormat",
|
| 660 |
+
"imageio.plugins.freeimage",
|
| 661 |
+
),
|
| 662 |
+
(
|
| 663 |
+
"PPMRAW",
|
| 664 |
+
15,
|
| 665 |
+
"Portable Pixelmap (RAW)",
|
| 666 |
+
".ppm",
|
| 667 |
+
"i",
|
| 668 |
+
"FreeimagePnmFormat",
|
| 669 |
+
"imageio.plugins.freeimage",
|
| 670 |
+
),
|
| 671 |
+
(
|
| 672 |
+
"PSD",
|
| 673 |
+
20,
|
| 674 |
+
"Adobe Photoshop",
|
| 675 |
+
".psd",
|
| 676 |
+
"i",
|
| 677 |
+
"FreeimageFormat",
|
| 678 |
+
"imageio.plugins.freeimage",
|
| 679 |
+
),
|
| 680 |
+
(
|
| 681 |
+
"RAS",
|
| 682 |
+
16,
|
| 683 |
+
"Sun Raster Image",
|
| 684 |
+
".ras",
|
| 685 |
+
"i",
|
| 686 |
+
"FreeimageFormat",
|
| 687 |
+
"imageio.plugins.freeimage",
|
| 688 |
+
),
|
| 689 |
+
(
|
| 690 |
+
"RAW",
|
| 691 |
+
34,
|
| 692 |
+
"RAW camera image",
|
| 693 |
+
".3fr .arw .bay .bmq .cap .cine .cr2 .crw .cs1 .dc2 "
|
| 694 |
+
".dcr .drf .dsc .dng .erf .fff .ia .iiq .k25 .kc2 .kdc .mdc .mef .mos .mrw .nef .nrw .orf "
|
| 695 |
+
".pef .ptx .pxn .qtk .raf .raw .rdc .rw2 .rwl .rwz .sr2 .srf .srw .sti",
|
| 696 |
+
"i",
|
| 697 |
+
"FreeimageFormat",
|
| 698 |
+
"imageio.plugins.freeimage",
|
| 699 |
+
),
|
| 700 |
+
(
|
| 701 |
+
"SGI",
|
| 702 |
+
28,
|
| 703 |
+
"SGI Image Format",
|
| 704 |
+
".sgi .rgb .rgba .bw",
|
| 705 |
+
"i",
|
| 706 |
+
"FreeimageFormat",
|
| 707 |
+
"imageio.plugins.freeimage",
|
| 708 |
+
),
|
| 709 |
+
(
|
| 710 |
+
"TARGA",
|
| 711 |
+
17,
|
| 712 |
+
"Truevision Targa",
|
| 713 |
+
".tga .targa",
|
| 714 |
+
"i",
|
| 715 |
+
"FreeimageFormat",
|
| 716 |
+
"imageio.plugins.freeimage",
|
| 717 |
+
),
|
| 718 |
+
(
|
| 719 |
+
"TIFF",
|
| 720 |
+
18,
|
| 721 |
+
"Tagged Image File Format",
|
| 722 |
+
".tif .tiff",
|
| 723 |
+
"i",
|
| 724 |
+
"FreeimageFormat",
|
| 725 |
+
"imageio.plugins.freeimage",
|
| 726 |
+
),
|
| 727 |
+
(
|
| 728 |
+
"WBMP",
|
| 729 |
+
19,
|
| 730 |
+
"Wireless Bitmap",
|
| 731 |
+
".wap .wbmp .wbm",
|
| 732 |
+
"i",
|
| 733 |
+
"FreeimageFormat",
|
| 734 |
+
"imageio.plugins.freeimage",
|
| 735 |
+
),
|
| 736 |
+
(
|
| 737 |
+
"WebP",
|
| 738 |
+
35,
|
| 739 |
+
"Google WebP image format",
|
| 740 |
+
".webp",
|
| 741 |
+
"i",
|
| 742 |
+
"FreeimageFormat",
|
| 743 |
+
"imageio.plugins.freeimage",
|
| 744 |
+
),
|
| 745 |
+
(
|
| 746 |
+
"XBM",
|
| 747 |
+
22,
|
| 748 |
+
"X11 Bitmap Format",
|
| 749 |
+
".xbm",
|
| 750 |
+
"i",
|
| 751 |
+
"FreeimageFormat",
|
| 752 |
+
"imageio.plugins.freeimage",
|
| 753 |
+
),
|
| 754 |
+
(
|
| 755 |
+
"XPM",
|
| 756 |
+
23,
|
| 757 |
+
"X11 Pixmap Format",
|
| 758 |
+
".xpm",
|
| 759 |
+
"i",
|
| 760 |
+
"FreeimageFormat",
|
| 761 |
+
"imageio.plugins.freeimage",
|
| 762 |
+
),
|
| 763 |
+
]
|
| 764 |
+
for name, i, des, ext, mode, class_name, module_name in FREEIMAGE_FORMATS:
|
| 765 |
+
config = PluginConfig(
|
| 766 |
+
name=name.upper() + "-FI",
|
| 767 |
+
class_name=class_name,
|
| 768 |
+
module_name=module_name,
|
| 769 |
+
is_legacy=True,
|
| 770 |
+
install_name="freeimage",
|
| 771 |
+
legacy_args={
|
| 772 |
+
"description": des,
|
| 773 |
+
"extensions": ext,
|
| 774 |
+
"modes": mode,
|
| 775 |
+
"fif": i,
|
| 776 |
+
},
|
| 777 |
+
)
|
| 778 |
+
known_plugins[config.name] = config
|
| 779 |
+
|
| 780 |
+
# exists for backwards compatibility with FormatManager
|
| 781 |
+
# delete in V3
|
| 782 |
+
_original_order = [x for x, config in known_plugins.items() if config.is_legacy]
|
parrot/lib/python3.10/site-packages/imageio/config/plugins.pyi
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, Optional
|
| 2 |
+
from ..core.v3_plugin_api import PluginV3
|
| 3 |
+
|
| 4 |
+
class PluginConfig:
|
| 5 |
+
name: str
|
| 6 |
+
class_name: str
|
| 7 |
+
module_name: str
|
| 8 |
+
is_legacy: bool
|
| 9 |
+
package_name: Optional[str] = None
|
| 10 |
+
install_name: Optional[str] = None
|
| 11 |
+
legacy_args: Optional[dict] = None
|
| 12 |
+
@property
|
| 13 |
+
def format(self) -> Any: ...
|
| 14 |
+
@property
|
| 15 |
+
def plugin_class(self) -> PluginV3: ...
|
| 16 |
+
def __init__(
|
| 17 |
+
self,
|
| 18 |
+
name: str,
|
| 19 |
+
class_name: str,
|
| 20 |
+
module_name: str,
|
| 21 |
+
*,
|
| 22 |
+
is_legacy: bool = False,
|
| 23 |
+
package_name: str = None,
|
| 24 |
+
install_name: str = None,
|
| 25 |
+
legacy_args: dict = None,
|
| 26 |
+
) -> None: ...
|
| 27 |
+
|
| 28 |
+
known_plugins: Dict[str, PluginConfig]
|
parrot/lib/python3.10/site-packages/imageio/freeze.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Helper functions for freezing imageio.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def get_includes():
|
| 7 |
+
return ["email", "urllib.request", "numpy", "zipfile", "io"]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def get_excludes():
|
| 11 |
+
return []
|
parrot/lib/python3.10/site-packages/imageio/plugins/__init__.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 3 |
+
|
| 4 |
+
# flake8: noqa
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
Here you can find documentation on how to write your own plugin to allow
|
| 8 |
+
ImageIO to access a new backend. Plugins are quite object oriented, and
|
| 9 |
+
the relevant classes and their interaction are documented here:
|
| 10 |
+
|
| 11 |
+
.. currentmodule:: imageio
|
| 12 |
+
|
| 13 |
+
.. autosummary::
|
| 14 |
+
:toctree: ../_autosummary
|
| 15 |
+
:template: better_class.rst
|
| 16 |
+
|
| 17 |
+
imageio.core.Format
|
| 18 |
+
imageio.core.Request
|
| 19 |
+
|
| 20 |
+
.. note::
|
| 21 |
+
You can always check existing plugins if you want to see examples.
|
| 22 |
+
|
| 23 |
+
What methods to implement
|
| 24 |
+
-------------------------
|
| 25 |
+
|
| 26 |
+
To implement a new plugin, create a new class that inherits from
|
| 27 |
+
:class:`imageio.core.Format`. and implement the following functions:
|
| 28 |
+
|
| 29 |
+
.. autosummary::
|
| 30 |
+
:toctree: ../_autosummary
|
| 31 |
+
|
| 32 |
+
imageio.core.Format.__init__
|
| 33 |
+
imageio.core.Format._can_read
|
| 34 |
+
imageio.core.Format._can_write
|
| 35 |
+
|
| 36 |
+
Further, each format contains up to two nested classes; one for reading and
|
| 37 |
+
one for writing. To support reading and/or writing, the respective classes
|
| 38 |
+
need to be defined.
|
| 39 |
+
|
| 40 |
+
For reading, create a nested class that inherits from
|
| 41 |
+
``imageio.core.Format.Reader`` and that implements the following functions:
|
| 42 |
+
|
| 43 |
+
* Implement ``_open(**kwargs)`` to initialize the reader. Deal with the
|
| 44 |
+
user-provided keyword arguments here.
|
| 45 |
+
* Implement ``_close()`` to clean up.
|
| 46 |
+
* Implement ``_get_length()`` to provide a suitable length based on what
|
| 47 |
+
the user expects. Can be ``inf`` for streaming data.
|
| 48 |
+
* Implement ``_get_data(index)`` to return an array and a meta-data dict.
|
| 49 |
+
* Implement ``_get_meta_data(index)`` to return a meta-data dict. If index
|
| 50 |
+
is None, it should return the 'global' meta-data.
|
| 51 |
+
|
| 52 |
+
For writing, create a nested class that inherits from
|
| 53 |
+
``imageio.core.Format.Writer`` and implement the following functions:
|
| 54 |
+
|
| 55 |
+
* Implement ``_open(**kwargs)`` to initialize the writer. Deal with the
|
| 56 |
+
user-provided keyword arguments here.
|
| 57 |
+
* Implement ``_close()`` to clean up.
|
| 58 |
+
* Implement ``_append_data(im, meta)`` to add data (and meta-data).
|
| 59 |
+
* Implement ``_set_meta_data(meta)`` to set the global meta-data.
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
import importlib
|
| 64 |
+
import os
|
| 65 |
+
import warnings
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# v2 imports remove in v3
|
| 69 |
+
from .. import formats
|
| 70 |
+
|
| 71 |
+
# v2 allows formatting plugins by environment variable
|
| 72 |
+
# this is done here.
|
| 73 |
+
env_plugin_order = os.getenv("IMAGEIO_FORMAT_ORDER", None)
|
| 74 |
+
if env_plugin_order is not None: # pragma: no cover
|
| 75 |
+
warnings.warn(
|
| 76 |
+
"Setting plugin priority through an environment variable is"
|
| 77 |
+
" deprecated and will be removed in ImageIO v3. There is no"
|
| 78 |
+
" replacement planned for this feature. If you have an"
|
| 79 |
+
" active use-case for it, please reach out to us on GitHub.",
|
| 80 |
+
DeprecationWarning,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
formats.sort(*os.getenv("IMAGEIO_FORMAT_ORDER", "").split(","))
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
# this class replaces plugin module. For details
|
| 87 |
+
# see https://stackoverflow.com/questions/2447353/getattr-on-a-module
|
| 88 |
+
def __getattr__(name):
|
| 89 |
+
"""Lazy-Import Plugins
|
| 90 |
+
|
| 91 |
+
This function dynamically loads plugins into the imageio.plugin
|
| 92 |
+
namespace upon first access. For example, the following snippet will
|
| 93 |
+
delay importing freeimage until the second line:
|
| 94 |
+
|
| 95 |
+
>>> import imageio
|
| 96 |
+
>>> imageio.plugins.freeimage.download()
|
| 97 |
+
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
try:
|
| 101 |
+
return importlib.import_module(f"imageio.plugins.{name}")
|
| 102 |
+
except ImportError:
|
| 103 |
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'") from None
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-310.pyc
ADDED
|
Binary file (25.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-310.pyc
ADDED
|
Binary file (28.9 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/example.cpython-310.pyc
ADDED
|
Binary file (3.42 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimage.cpython-310.pyc
ADDED
|
Binary file (14 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/gdal.cpython-310.pyc
ADDED
|
Binary file (2.36 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/grab.cpython-310.pyc
ADDED
|
Binary file (3.64 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/lytro.cpython-310.pyc
ADDED
|
Binary file (15.6 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/opencv.cpython-310.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow.cpython-310.pyc
ADDED
|
Binary file (18.5 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-310.pyc
ADDED
|
Binary file (23.2 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-310.pyc
ADDED
|
Binary file (9.27 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/rawpy.cpython-310.pyc
ADDED
|
Binary file (5.14 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/spe.cpython-310.pyc
ADDED
|
Binary file (28.3 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/swf.cpython-310.pyc
ADDED
|
Binary file (8.68 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile.cpython-310.pyc
ADDED
|
Binary file (18.7 kB). View file
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/_bsdf.py
ADDED
|
@@ -0,0 +1,915 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# This file is distributed under the terms of the 2-clause BSD License.
|
| 3 |
+
# Copyright (c) 2017-2018, Almar Klein
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
Python implementation of the Binary Structured Data Format (BSDF).
|
| 7 |
+
|
| 8 |
+
BSDF is a binary format for serializing structured (scientific) data.
|
| 9 |
+
See http://bsdf.io for more information.
|
| 10 |
+
|
| 11 |
+
This is the reference implementation, which is relatively relatively
|
| 12 |
+
sophisticated, providing e.g. lazy loading of blobs and streamed
|
| 13 |
+
reading/writing. A simpler Python implementation is available as
|
| 14 |
+
``bsdf_lite.py``.
|
| 15 |
+
|
| 16 |
+
This module has no dependencies and works on Python 2.7 and 3.4+.
|
| 17 |
+
|
| 18 |
+
Note: on Legacy Python (Python 2.7), non-Unicode strings are encoded as bytes.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
# todo: in 2020, remove six stuff, __future__ and _isidentifier
|
| 22 |
+
# todo: in 2020, remove 'utf-8' args to encode/decode; it's faster
|
| 23 |
+
|
| 24 |
+
from __future__ import absolute_import, division, print_function
|
| 25 |
+
|
| 26 |
+
import bz2
|
| 27 |
+
import hashlib
|
| 28 |
+
import logging
|
| 29 |
+
import os
|
| 30 |
+
import re
|
| 31 |
+
import struct
|
| 32 |
+
import sys
|
| 33 |
+
import types
|
| 34 |
+
import zlib
|
| 35 |
+
from io import BytesIO
|
| 36 |
+
|
| 37 |
+
logger = logging.getLogger(__name__)
|
| 38 |
+
|
| 39 |
+
# Notes on versioning: the major and minor numbers correspond to the
|
| 40 |
+
# BSDF format version. The major number if increased when backward
|
| 41 |
+
# incompatible changes are introduced. An implementation must raise an
|
| 42 |
+
# exception when the file being read has a higher major version. The
|
| 43 |
+
# minor number is increased when new backward compatible features are
|
| 44 |
+
# introduced. An implementation must display a warning when the file
|
| 45 |
+
# being read has a higher minor version. The patch version is increased
|
| 46 |
+
# for subsequent releases of the implementation.
|
| 47 |
+
VERSION = 2, 1, 2
|
| 48 |
+
__version__ = ".".join(str(i) for i in VERSION)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# %% The encoder and decoder implementation
|
| 52 |
+
|
| 53 |
+
# From six.py
|
| 54 |
+
PY3 = sys.version_info[0] >= 3
|
| 55 |
+
if PY3:
|
| 56 |
+
text_type = str
|
| 57 |
+
string_types = str
|
| 58 |
+
unicode_types = str
|
| 59 |
+
integer_types = int
|
| 60 |
+
classtypes = type
|
| 61 |
+
else: # pragma: no cover
|
| 62 |
+
logging.basicConfig() # avoid "no handlers found" error
|
| 63 |
+
text_type = unicode # noqa
|
| 64 |
+
string_types = basestring # noqa
|
| 65 |
+
unicode_types = unicode # noqa
|
| 66 |
+
integer_types = (int, long) # noqa
|
| 67 |
+
classtypes = type, types.ClassType
|
| 68 |
+
|
| 69 |
+
# Shorthands
|
| 70 |
+
spack = struct.pack
|
| 71 |
+
strunpack = struct.unpack
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def lencode(x):
|
| 75 |
+
"""Encode an unsigned integer into a variable sized blob of bytes."""
|
| 76 |
+
# We could support 16 bit and 32 bit as well, but the gain is low, since
|
| 77 |
+
# 9 bytes for collections with over 250 elements is marginal anyway.
|
| 78 |
+
if x <= 250:
|
| 79 |
+
return spack("<B", x)
|
| 80 |
+
# elif x < 65536:
|
| 81 |
+
# return spack('<BH', 251, x)
|
| 82 |
+
# elif x < 4294967296:
|
| 83 |
+
# return spack('<BI', 252, x)
|
| 84 |
+
else:
|
| 85 |
+
return spack("<BQ", 253, x)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# Include len decoder for completeness; we've inlined it for performance.
|
| 89 |
+
def lendecode(f):
|
| 90 |
+
"""Decode an unsigned integer from a file."""
|
| 91 |
+
n = strunpack("<B", f.read(1))[0]
|
| 92 |
+
if n == 253:
|
| 93 |
+
n = strunpack("<Q", f.read(8))[0] # noqa
|
| 94 |
+
return n
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def encode_type_id(b, ext_id):
|
| 98 |
+
"""Encode the type identifier, with or without extension id."""
|
| 99 |
+
if ext_id is not None:
|
| 100 |
+
bb = ext_id.encode("UTF-8")
|
| 101 |
+
return b.upper() + lencode(len(bb)) + bb # noqa
|
| 102 |
+
else:
|
| 103 |
+
return b # noqa
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def _isidentifier(s): # pragma: no cover
|
| 107 |
+
"""Use of str.isidentifier() for Legacy Python, but slower."""
|
| 108 |
+
# http://stackoverflow.com/questions/2544972/
|
| 109 |
+
return (
|
| 110 |
+
isinstance(s, string_types)
|
| 111 |
+
and re.match(r"^\w+$", s, re.UNICODE)
|
| 112 |
+
and re.match(r"^[0-9]", s) is None
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class BsdfSerializer(object):
|
| 117 |
+
"""Instances of this class represent a BSDF encoder/decoder.
|
| 118 |
+
|
| 119 |
+
It acts as a placeholder for a set of extensions and encoding/decoding
|
| 120 |
+
options. Use this to predefine extensions and options for high
|
| 121 |
+
performance encoding/decoding. For general use, see the functions
|
| 122 |
+
`save()`, `encode()`, `load()`, and `decode()`.
|
| 123 |
+
|
| 124 |
+
This implementation of BSDF supports streaming lists (keep adding
|
| 125 |
+
to a list after writing the main file), lazy loading of blobs, and
|
| 126 |
+
in-place editing of blobs (for streams opened with a+).
|
| 127 |
+
|
| 128 |
+
Options for encoding:
|
| 129 |
+
|
| 130 |
+
* compression (int or str): ``0`` or "no" for no compression (default),
|
| 131 |
+
``1`` or "zlib" for Zlib compression (same as zip files and PNG), and
|
| 132 |
+
``2`` or "bz2" for Bz2 compression (more compact but slower writing).
|
| 133 |
+
Note that some BSDF implementations (e.g. JavaScript) may not support
|
| 134 |
+
compression.
|
| 135 |
+
* use_checksum (bool): whether to include a checksum with binary blobs.
|
| 136 |
+
* float64 (bool): Whether to write floats as 64 bit (default) or 32 bit.
|
| 137 |
+
|
| 138 |
+
Options for decoding:
|
| 139 |
+
|
| 140 |
+
* load_streaming (bool): if True, and the final object in the structure was
|
| 141 |
+
a stream, will make it available as a stream in the decoded object.
|
| 142 |
+
* lazy_blob (bool): if True, bytes are represented as Blob objects that can
|
| 143 |
+
be used to lazily access the data, and also overwrite the data if the
|
| 144 |
+
file is open in a+ mode.
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
def __init__(self, extensions=None, **options):
|
| 148 |
+
self._extensions = {} # name -> extension
|
| 149 |
+
self._extensions_by_cls = {} # cls -> (name, extension.encode)
|
| 150 |
+
if extensions is None:
|
| 151 |
+
extensions = standard_extensions
|
| 152 |
+
for extension in extensions:
|
| 153 |
+
self.add_extension(extension)
|
| 154 |
+
self._parse_options(**options)
|
| 155 |
+
|
| 156 |
+
def _parse_options(
|
| 157 |
+
self,
|
| 158 |
+
compression=0,
|
| 159 |
+
use_checksum=False,
|
| 160 |
+
float64=True,
|
| 161 |
+
load_streaming=False,
|
| 162 |
+
lazy_blob=False,
|
| 163 |
+
):
|
| 164 |
+
# Validate compression
|
| 165 |
+
if isinstance(compression, string_types):
|
| 166 |
+
m = {"no": 0, "zlib": 1, "bz2": 2}
|
| 167 |
+
compression = m.get(compression.lower(), compression)
|
| 168 |
+
if compression not in (0, 1, 2):
|
| 169 |
+
raise TypeError("Compression must be 0, 1, 2, " '"no", "zlib", or "bz2"')
|
| 170 |
+
self._compression = compression
|
| 171 |
+
|
| 172 |
+
# Other encoding args
|
| 173 |
+
self._use_checksum = bool(use_checksum)
|
| 174 |
+
self._float64 = bool(float64)
|
| 175 |
+
|
| 176 |
+
# Decoding args
|
| 177 |
+
self._load_streaming = bool(load_streaming)
|
| 178 |
+
self._lazy_blob = bool(lazy_blob)
|
| 179 |
+
|
| 180 |
+
def add_extension(self, extension_class):
|
| 181 |
+
"""Add an extension to this serializer instance, which must be
|
| 182 |
+
a subclass of Extension. Can be used as a decorator.
|
| 183 |
+
"""
|
| 184 |
+
# Check class
|
| 185 |
+
if not (
|
| 186 |
+
isinstance(extension_class, type) and issubclass(extension_class, Extension)
|
| 187 |
+
):
|
| 188 |
+
raise TypeError("add_extension() expects a Extension class.")
|
| 189 |
+
extension = extension_class()
|
| 190 |
+
|
| 191 |
+
# Get name
|
| 192 |
+
name = extension.name
|
| 193 |
+
if not isinstance(name, str):
|
| 194 |
+
raise TypeError("Extension name must be str.")
|
| 195 |
+
if len(name) == 0 or len(name) > 250:
|
| 196 |
+
raise NameError(
|
| 197 |
+
"Extension names must be nonempty and shorter " "than 251 chars."
|
| 198 |
+
)
|
| 199 |
+
if name in self._extensions:
|
| 200 |
+
logger.warning(
|
| 201 |
+
'BSDF warning: overwriting extension "%s", '
|
| 202 |
+
"consider removing first" % name
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
# Get classes
|
| 206 |
+
cls = extension.cls
|
| 207 |
+
if not cls:
|
| 208 |
+
clss = []
|
| 209 |
+
elif isinstance(cls, (tuple, list)):
|
| 210 |
+
clss = cls
|
| 211 |
+
else:
|
| 212 |
+
clss = [cls]
|
| 213 |
+
for cls in clss:
|
| 214 |
+
if not isinstance(cls, classtypes):
|
| 215 |
+
raise TypeError("Extension classes must be types.")
|
| 216 |
+
|
| 217 |
+
# Store
|
| 218 |
+
for cls in clss:
|
| 219 |
+
self._extensions_by_cls[cls] = name, extension.encode
|
| 220 |
+
self._extensions[name] = extension
|
| 221 |
+
return extension_class
|
| 222 |
+
|
| 223 |
+
def remove_extension(self, name):
|
| 224 |
+
"""Remove a converted by its unique name."""
|
| 225 |
+
if not isinstance(name, str):
|
| 226 |
+
raise TypeError("Extension name must be str.")
|
| 227 |
+
if name in self._extensions:
|
| 228 |
+
self._extensions.pop(name)
|
| 229 |
+
for cls in list(self._extensions_by_cls.keys()):
|
| 230 |
+
if self._extensions_by_cls[cls][0] == name:
|
| 231 |
+
self._extensions_by_cls.pop(cls)
|
| 232 |
+
|
| 233 |
+
def _encode(self, f, value, streams, ext_id):
|
| 234 |
+
"""Main encoder function."""
|
| 235 |
+
x = encode_type_id
|
| 236 |
+
|
| 237 |
+
if value is None:
|
| 238 |
+
f.write(x(b"v", ext_id)) # V for void
|
| 239 |
+
elif value is True:
|
| 240 |
+
f.write(x(b"y", ext_id)) # Y for yes
|
| 241 |
+
elif value is False:
|
| 242 |
+
f.write(x(b"n", ext_id)) # N for no
|
| 243 |
+
elif isinstance(value, integer_types):
|
| 244 |
+
if -32768 <= value <= 32767:
|
| 245 |
+
f.write(x(b"h", ext_id) + spack("h", value)) # H for ...
|
| 246 |
+
else:
|
| 247 |
+
f.write(x(b"i", ext_id) + spack("<q", value)) # I for int
|
| 248 |
+
elif isinstance(value, float):
|
| 249 |
+
if self._float64:
|
| 250 |
+
f.write(x(b"d", ext_id) + spack("<d", value)) # D for double
|
| 251 |
+
else:
|
| 252 |
+
f.write(x(b"f", ext_id) + spack("<f", value)) # f for float
|
| 253 |
+
elif isinstance(value, unicode_types):
|
| 254 |
+
bb = value.encode("UTF-8")
|
| 255 |
+
f.write(x(b"s", ext_id) + lencode(len(bb))) # S for str
|
| 256 |
+
f.write(bb)
|
| 257 |
+
elif isinstance(value, (list, tuple)):
|
| 258 |
+
f.write(x(b"l", ext_id) + lencode(len(value))) # L for list
|
| 259 |
+
for v in value:
|
| 260 |
+
self._encode(f, v, streams, None)
|
| 261 |
+
elif isinstance(value, dict):
|
| 262 |
+
f.write(x(b"m", ext_id) + lencode(len(value))) # M for mapping
|
| 263 |
+
for key, v in value.items():
|
| 264 |
+
if PY3:
|
| 265 |
+
assert key.isidentifier() # faster
|
| 266 |
+
else: # pragma: no cover
|
| 267 |
+
assert _isidentifier(key)
|
| 268 |
+
# yield ' ' * indent + key
|
| 269 |
+
name_b = key.encode("UTF-8")
|
| 270 |
+
f.write(lencode(len(name_b)))
|
| 271 |
+
f.write(name_b)
|
| 272 |
+
self._encode(f, v, streams, None)
|
| 273 |
+
elif isinstance(value, bytes):
|
| 274 |
+
f.write(x(b"b", ext_id)) # B for blob
|
| 275 |
+
blob = Blob(
|
| 276 |
+
value, compression=self._compression, use_checksum=self._use_checksum
|
| 277 |
+
)
|
| 278 |
+
blob._to_file(f) # noqa
|
| 279 |
+
elif isinstance(value, Blob):
|
| 280 |
+
f.write(x(b"b", ext_id)) # B for blob
|
| 281 |
+
value._to_file(f) # noqa
|
| 282 |
+
elif isinstance(value, BaseStream):
|
| 283 |
+
# Initialize the stream
|
| 284 |
+
if value.mode != "w":
|
| 285 |
+
raise ValueError("Cannot serialize a read-mode stream.")
|
| 286 |
+
elif isinstance(value, ListStream):
|
| 287 |
+
f.write(x(b"l", ext_id) + spack("<BQ", 255, 0)) # L for list
|
| 288 |
+
else:
|
| 289 |
+
raise TypeError("Only ListStream is supported")
|
| 290 |
+
# Mark this as *the* stream, and activate the stream.
|
| 291 |
+
# The save() function verifies this is the last written object.
|
| 292 |
+
if len(streams) > 0:
|
| 293 |
+
raise ValueError("Can only have one stream per file.")
|
| 294 |
+
streams.append(value)
|
| 295 |
+
value._activate(f, self._encode, self._decode) # noqa
|
| 296 |
+
else:
|
| 297 |
+
if ext_id is not None:
|
| 298 |
+
raise ValueError(
|
| 299 |
+
"Extension %s wronfully encodes object to another "
|
| 300 |
+
"extension object (though it may encode to a list/dict "
|
| 301 |
+
"that contains other extension objects)." % ext_id
|
| 302 |
+
)
|
| 303 |
+
# Try if the value is of a type we know
|
| 304 |
+
ex = self._extensions_by_cls.get(value.__class__, None)
|
| 305 |
+
# Maybe its a subclass of a type we know
|
| 306 |
+
if ex is None:
|
| 307 |
+
for name, c in self._extensions.items():
|
| 308 |
+
if c.match(self, value):
|
| 309 |
+
ex = name, c.encode
|
| 310 |
+
break
|
| 311 |
+
else:
|
| 312 |
+
ex = None
|
| 313 |
+
# Success or fail
|
| 314 |
+
if ex is not None:
|
| 315 |
+
ext_id2, extension_encode = ex
|
| 316 |
+
self._encode(f, extension_encode(self, value), streams, ext_id2)
|
| 317 |
+
else:
|
| 318 |
+
t = (
|
| 319 |
+
"Class %r is not a valid base BSDF type, nor is it "
|
| 320 |
+
"handled by an extension."
|
| 321 |
+
)
|
| 322 |
+
raise TypeError(t % value.__class__.__name__)
|
| 323 |
+
|
| 324 |
+
def _decode(self, f):
|
| 325 |
+
"""Main decoder function."""
|
| 326 |
+
|
| 327 |
+
# Get value
|
| 328 |
+
char = f.read(1)
|
| 329 |
+
c = char.lower()
|
| 330 |
+
|
| 331 |
+
# Conversion (uppercase value identifiers signify converted values)
|
| 332 |
+
if not char:
|
| 333 |
+
raise EOFError()
|
| 334 |
+
elif char != c:
|
| 335 |
+
n = strunpack("<B", f.read(1))[0]
|
| 336 |
+
# if n == 253: n = strunpack('<Q', f.read(8))[0] # noqa - noneed
|
| 337 |
+
ext_id = f.read(n).decode("UTF-8")
|
| 338 |
+
else:
|
| 339 |
+
ext_id = None
|
| 340 |
+
|
| 341 |
+
if c == b"v":
|
| 342 |
+
value = None
|
| 343 |
+
elif c == b"y":
|
| 344 |
+
value = True
|
| 345 |
+
elif c == b"n":
|
| 346 |
+
value = False
|
| 347 |
+
elif c == b"h":
|
| 348 |
+
value = strunpack("<h", f.read(2))[0]
|
| 349 |
+
elif c == b"i":
|
| 350 |
+
value = strunpack("<q", f.read(8))[0]
|
| 351 |
+
elif c == b"f":
|
| 352 |
+
value = strunpack("<f", f.read(4))[0]
|
| 353 |
+
elif c == b"d":
|
| 354 |
+
value = strunpack("<d", f.read(8))[0]
|
| 355 |
+
elif c == b"s":
|
| 356 |
+
n_s = strunpack("<B", f.read(1))[0]
|
| 357 |
+
if n_s == 253:
|
| 358 |
+
n_s = strunpack("<Q", f.read(8))[0] # noqa
|
| 359 |
+
value = f.read(n_s).decode("UTF-8")
|
| 360 |
+
elif c == b"l":
|
| 361 |
+
n = strunpack("<B", f.read(1))[0]
|
| 362 |
+
if n >= 254:
|
| 363 |
+
# Streaming
|
| 364 |
+
closed = n == 254
|
| 365 |
+
n = strunpack("<Q", f.read(8))[0]
|
| 366 |
+
if self._load_streaming:
|
| 367 |
+
value = ListStream(n if closed else "r")
|
| 368 |
+
value._activate(f, self._encode, self._decode) # noqa
|
| 369 |
+
elif closed:
|
| 370 |
+
value = [self._decode(f) for i in range(n)]
|
| 371 |
+
else:
|
| 372 |
+
value = []
|
| 373 |
+
try:
|
| 374 |
+
while True:
|
| 375 |
+
value.append(self._decode(f))
|
| 376 |
+
except EOFError:
|
| 377 |
+
pass
|
| 378 |
+
else:
|
| 379 |
+
# Normal
|
| 380 |
+
if n == 253:
|
| 381 |
+
n = strunpack("<Q", f.read(8))[0] # noqa
|
| 382 |
+
value = [self._decode(f) for i in range(n)]
|
| 383 |
+
elif c == b"m":
|
| 384 |
+
value = dict()
|
| 385 |
+
n = strunpack("<B", f.read(1))[0]
|
| 386 |
+
if n == 253:
|
| 387 |
+
n = strunpack("<Q", f.read(8))[0] # noqa
|
| 388 |
+
for i in range(n):
|
| 389 |
+
n_name = strunpack("<B", f.read(1))[0]
|
| 390 |
+
if n_name == 253:
|
| 391 |
+
n_name = strunpack("<Q", f.read(8))[0] # noqa
|
| 392 |
+
assert n_name > 0
|
| 393 |
+
name = f.read(n_name).decode("UTF-8")
|
| 394 |
+
value[name] = self._decode(f)
|
| 395 |
+
elif c == b"b":
|
| 396 |
+
if self._lazy_blob:
|
| 397 |
+
value = Blob((f, True))
|
| 398 |
+
else:
|
| 399 |
+
blob = Blob((f, False))
|
| 400 |
+
value = blob.get_bytes()
|
| 401 |
+
else:
|
| 402 |
+
raise RuntimeError("Parse error %r" % char)
|
| 403 |
+
|
| 404 |
+
# Convert value if we have an extension for it
|
| 405 |
+
if ext_id is not None:
|
| 406 |
+
extension = self._extensions.get(ext_id, None)
|
| 407 |
+
if extension is not None:
|
| 408 |
+
value = extension.decode(self, value)
|
| 409 |
+
else:
|
| 410 |
+
logger.warning("BSDF warning: no extension found for %r" % ext_id)
|
| 411 |
+
|
| 412 |
+
return value
|
| 413 |
+
|
| 414 |
+
def encode(self, ob):
|
| 415 |
+
"""Save the given object to bytes."""
|
| 416 |
+
f = BytesIO()
|
| 417 |
+
self.save(f, ob)
|
| 418 |
+
return f.getvalue()
|
| 419 |
+
|
| 420 |
+
def save(self, f, ob):
|
| 421 |
+
"""Write the given object to the given file object."""
|
| 422 |
+
f.write(b"BSDF")
|
| 423 |
+
f.write(struct.pack("<B", VERSION[0]))
|
| 424 |
+
f.write(struct.pack("<B", VERSION[1]))
|
| 425 |
+
|
| 426 |
+
# Prepare streaming, this list will have 0 or 1 item at the end
|
| 427 |
+
streams = []
|
| 428 |
+
|
| 429 |
+
self._encode(f, ob, streams, None)
|
| 430 |
+
|
| 431 |
+
# Verify that stream object was at the end, and add initial elements
|
| 432 |
+
if len(streams) > 0:
|
| 433 |
+
stream = streams[0]
|
| 434 |
+
if stream._start_pos != f.tell():
|
| 435 |
+
raise ValueError(
|
| 436 |
+
"The stream object must be " "the last object to be encoded."
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
def decode(self, bb):
|
| 440 |
+
"""Load the data structure that is BSDF-encoded in the given bytes."""
|
| 441 |
+
f = BytesIO(bb)
|
| 442 |
+
return self.load(f)
|
| 443 |
+
|
| 444 |
+
def load(self, f):
|
| 445 |
+
"""Load a BSDF-encoded object from the given file object."""
|
| 446 |
+
# Check magic string
|
| 447 |
+
f4 = f.read(4)
|
| 448 |
+
if f4 != b"BSDF":
|
| 449 |
+
raise RuntimeError("This does not look like a BSDF file: %r" % f4)
|
| 450 |
+
# Check version
|
| 451 |
+
major_version = strunpack("<B", f.read(1))[0]
|
| 452 |
+
minor_version = strunpack("<B", f.read(1))[0]
|
| 453 |
+
file_version = "%i.%i" % (major_version, minor_version)
|
| 454 |
+
if major_version != VERSION[0]: # major version should be 2
|
| 455 |
+
t = (
|
| 456 |
+
"Reading file with different major version (%s) "
|
| 457 |
+
"from the implementation (%s)."
|
| 458 |
+
)
|
| 459 |
+
raise RuntimeError(t % (__version__, file_version))
|
| 460 |
+
if minor_version > VERSION[1]: # minor should be < ours
|
| 461 |
+
t = (
|
| 462 |
+
"BSDF warning: reading file with higher minor version (%s) "
|
| 463 |
+
"than the implementation (%s)."
|
| 464 |
+
)
|
| 465 |
+
logger.warning(t % (__version__, file_version))
|
| 466 |
+
|
| 467 |
+
return self._decode(f)
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
# %% Streaming and blob-files
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
class BaseStream(object):
|
| 474 |
+
"""Base class for streams."""
|
| 475 |
+
|
| 476 |
+
def __init__(self, mode="w"):
|
| 477 |
+
self._i = 0
|
| 478 |
+
self._count = -1
|
| 479 |
+
if isinstance(mode, int):
|
| 480 |
+
self._count = mode
|
| 481 |
+
mode = "r"
|
| 482 |
+
elif mode == "w":
|
| 483 |
+
self._count = 0
|
| 484 |
+
assert mode in ("r", "w")
|
| 485 |
+
self._mode = mode
|
| 486 |
+
self._f = None
|
| 487 |
+
self._start_pos = 0
|
| 488 |
+
|
| 489 |
+
def _activate(self, file, encode_func, decode_func):
|
| 490 |
+
if self._f is not None: # Associated with another write
|
| 491 |
+
raise IOError("Stream object cannot be activated twice?")
|
| 492 |
+
self._f = file
|
| 493 |
+
self._start_pos = self._f.tell()
|
| 494 |
+
self._encode = encode_func
|
| 495 |
+
self._decode = decode_func
|
| 496 |
+
|
| 497 |
+
@property
|
| 498 |
+
def mode(self):
|
| 499 |
+
"""The mode of this stream: 'r' or 'w'."""
|
| 500 |
+
return self._mode
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
class ListStream(BaseStream):
|
| 504 |
+
"""A streamable list object used for writing or reading.
|
| 505 |
+
In read mode, it can also be iterated over.
|
| 506 |
+
"""
|
| 507 |
+
|
| 508 |
+
@property
|
| 509 |
+
def count(self):
|
| 510 |
+
"""The number of elements in the stream (can be -1 for unclosed
|
| 511 |
+
streams in read-mode).
|
| 512 |
+
"""
|
| 513 |
+
return self._count
|
| 514 |
+
|
| 515 |
+
@property
|
| 516 |
+
def index(self):
|
| 517 |
+
"""The current index of the element to read/write."""
|
| 518 |
+
return self._i
|
| 519 |
+
|
| 520 |
+
def append(self, item):
|
| 521 |
+
"""Append an item to the streaming list. The object is immediately
|
| 522 |
+
serialized and written to the underlying file.
|
| 523 |
+
"""
|
| 524 |
+
# if self._mode != 'w':
|
| 525 |
+
# raise IOError('This ListStream is not in write mode.')
|
| 526 |
+
if self._count != self._i:
|
| 527 |
+
raise IOError("Can only append items to the end of the stream.")
|
| 528 |
+
if self._f is None:
|
| 529 |
+
raise IOError("List stream is not associated with a file yet.")
|
| 530 |
+
if self._f.closed:
|
| 531 |
+
raise IOError("Cannot stream to a close file.")
|
| 532 |
+
self._encode(self._f, item, [self], None)
|
| 533 |
+
self._i += 1
|
| 534 |
+
self._count += 1
|
| 535 |
+
|
| 536 |
+
def close(self, unstream=False):
|
| 537 |
+
"""Close the stream, marking the number of written elements. New
|
| 538 |
+
elements may still be appended, but they won't be read during decoding.
|
| 539 |
+
If ``unstream`` is False, the stream is turned into a regular list
|
| 540 |
+
(not streaming).
|
| 541 |
+
"""
|
| 542 |
+
# if self._mode != 'w':
|
| 543 |
+
# raise IOError('This ListStream is not in write mode.')
|
| 544 |
+
if self._count != self._i:
|
| 545 |
+
raise IOError("Can only close when at the end of the stream.")
|
| 546 |
+
if self._f is None:
|
| 547 |
+
raise IOError("ListStream is not associated with a file yet.")
|
| 548 |
+
if self._f.closed:
|
| 549 |
+
raise IOError("Cannot close a stream on a close file.")
|
| 550 |
+
i = self._f.tell()
|
| 551 |
+
self._f.seek(self._start_pos - 8 - 1)
|
| 552 |
+
self._f.write(spack("<B", 253 if unstream else 254))
|
| 553 |
+
self._f.write(spack("<Q", self._count))
|
| 554 |
+
self._f.seek(i)
|
| 555 |
+
|
| 556 |
+
def next(self):
|
| 557 |
+
"""Read and return the next element in the streaming list.
|
| 558 |
+
Raises StopIteration if the stream is exhausted.
|
| 559 |
+
"""
|
| 560 |
+
if self._mode != "r":
|
| 561 |
+
raise IOError("This ListStream in not in read mode.")
|
| 562 |
+
if self._f is None:
|
| 563 |
+
raise IOError("ListStream is not associated with a file yet.")
|
| 564 |
+
if getattr(self._f, "closed", None): # not present on 2.7 http req :/
|
| 565 |
+
raise IOError("Cannot read a stream from a close file.")
|
| 566 |
+
if self._count >= 0:
|
| 567 |
+
if self._i >= self._count:
|
| 568 |
+
raise StopIteration()
|
| 569 |
+
self._i += 1
|
| 570 |
+
return self._decode(self._f)
|
| 571 |
+
else:
|
| 572 |
+
# This raises EOFError at some point.
|
| 573 |
+
try:
|
| 574 |
+
res = self._decode(self._f)
|
| 575 |
+
self._i += 1
|
| 576 |
+
return res
|
| 577 |
+
except EOFError:
|
| 578 |
+
self._count = self._i
|
| 579 |
+
raise StopIteration()
|
| 580 |
+
|
| 581 |
+
def __iter__(self):
|
| 582 |
+
if self._mode != "r":
|
| 583 |
+
raise IOError("Cannot iterate: ListStream in not in read mode.")
|
| 584 |
+
return self
|
| 585 |
+
|
| 586 |
+
def __next__(self):
|
| 587 |
+
return self.next()
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
class Blob(object):
|
| 591 |
+
"""Object to represent a blob of bytes. When used to write a BSDF file,
|
| 592 |
+
it's a wrapper for bytes plus properties such as what compression to apply.
|
| 593 |
+
When used to read a BSDF file, it can be used to read the data lazily, and
|
| 594 |
+
also modify the data if reading in 'r+' mode and the blob isn't compressed.
|
| 595 |
+
"""
|
| 596 |
+
|
| 597 |
+
# For now, this does not allow re-sizing blobs (within the allocated size)
|
| 598 |
+
# but this can be added later.
|
| 599 |
+
|
| 600 |
+
def __init__(self, bb, compression=0, extra_size=0, use_checksum=False):
|
| 601 |
+
if isinstance(bb, bytes):
|
| 602 |
+
self._f = None
|
| 603 |
+
self.compressed = self._from_bytes(bb, compression)
|
| 604 |
+
self.compression = compression
|
| 605 |
+
self.allocated_size = self.used_size + extra_size
|
| 606 |
+
self.use_checksum = use_checksum
|
| 607 |
+
elif isinstance(bb, tuple) and len(bb) == 2 and hasattr(bb[0], "read"):
|
| 608 |
+
self._f, allow_seek = bb
|
| 609 |
+
self.compressed = None
|
| 610 |
+
self._from_file(self._f, allow_seek)
|
| 611 |
+
self._modified = False
|
| 612 |
+
else:
|
| 613 |
+
raise TypeError("Wrong argument to create Blob.")
|
| 614 |
+
|
| 615 |
+
def _from_bytes(self, value, compression):
|
| 616 |
+
"""When used to wrap bytes in a blob."""
|
| 617 |
+
if compression == 0:
|
| 618 |
+
compressed = value
|
| 619 |
+
elif compression == 1:
|
| 620 |
+
compressed = zlib.compress(value, 9)
|
| 621 |
+
elif compression == 2:
|
| 622 |
+
compressed = bz2.compress(value, 9)
|
| 623 |
+
else: # pragma: no cover
|
| 624 |
+
assert False, "Unknown compression identifier"
|
| 625 |
+
|
| 626 |
+
self.data_size = len(value)
|
| 627 |
+
self.used_size = len(compressed)
|
| 628 |
+
return compressed
|
| 629 |
+
|
| 630 |
+
def _to_file(self, f):
|
| 631 |
+
"""Private friend method called by encoder to write a blob to a file."""
|
| 632 |
+
# Write sizes - write at least in a size that allows resizing
|
| 633 |
+
if self.allocated_size <= 250 and self.compression == 0:
|
| 634 |
+
f.write(spack("<B", self.allocated_size))
|
| 635 |
+
f.write(spack("<B", self.used_size))
|
| 636 |
+
f.write(lencode(self.data_size))
|
| 637 |
+
else:
|
| 638 |
+
f.write(spack("<BQ", 253, self.allocated_size))
|
| 639 |
+
f.write(spack("<BQ", 253, self.used_size))
|
| 640 |
+
f.write(spack("<BQ", 253, self.data_size))
|
| 641 |
+
# Compression and checksum
|
| 642 |
+
f.write(spack("B", self.compression))
|
| 643 |
+
if self.use_checksum:
|
| 644 |
+
f.write(b"\xff" + hashlib.md5(self.compressed).digest())
|
| 645 |
+
else:
|
| 646 |
+
f.write(b"\x00")
|
| 647 |
+
# Byte alignment (only necessary for uncompressed data)
|
| 648 |
+
if self.compression == 0:
|
| 649 |
+
alignment = 8 - (f.tell() + 1) % 8 # +1 for the byte to write
|
| 650 |
+
f.write(spack("<B", alignment)) # padding for byte alignment
|
| 651 |
+
f.write(b"\x00" * alignment)
|
| 652 |
+
else:
|
| 653 |
+
f.write(spack("<B", 0))
|
| 654 |
+
# The actual data and extra space
|
| 655 |
+
f.write(self.compressed)
|
| 656 |
+
f.write(b"\x00" * (self.allocated_size - self.used_size))
|
| 657 |
+
|
| 658 |
+
def _from_file(self, f, allow_seek):
|
| 659 |
+
"""Used when a blob is read by the decoder."""
|
| 660 |
+
# Read blob header data (5 to 42 bytes)
|
| 661 |
+
# Size
|
| 662 |
+
allocated_size = strunpack("<B", f.read(1))[0]
|
| 663 |
+
if allocated_size == 253:
|
| 664 |
+
allocated_size = strunpack("<Q", f.read(8))[0] # noqa
|
| 665 |
+
used_size = strunpack("<B", f.read(1))[0]
|
| 666 |
+
if used_size == 253:
|
| 667 |
+
used_size = strunpack("<Q", f.read(8))[0] # noqa
|
| 668 |
+
data_size = strunpack("<B", f.read(1))[0]
|
| 669 |
+
if data_size == 253:
|
| 670 |
+
data_size = strunpack("<Q", f.read(8))[0] # noqa
|
| 671 |
+
# Compression and checksum
|
| 672 |
+
compression = strunpack("<B", f.read(1))[0]
|
| 673 |
+
has_checksum = strunpack("<B", f.read(1))[0]
|
| 674 |
+
if has_checksum:
|
| 675 |
+
checksum = f.read(16)
|
| 676 |
+
# Skip alignment
|
| 677 |
+
alignment = strunpack("<B", f.read(1))[0]
|
| 678 |
+
f.read(alignment)
|
| 679 |
+
# Get or skip data + extra space
|
| 680 |
+
if allow_seek:
|
| 681 |
+
self.start_pos = f.tell()
|
| 682 |
+
self.end_pos = self.start_pos + used_size
|
| 683 |
+
f.seek(self.start_pos + allocated_size)
|
| 684 |
+
else:
|
| 685 |
+
self.start_pos = None
|
| 686 |
+
self.end_pos = None
|
| 687 |
+
self.compressed = f.read(used_size)
|
| 688 |
+
f.read(allocated_size - used_size)
|
| 689 |
+
# Store info
|
| 690 |
+
self.alignment = alignment
|
| 691 |
+
self.compression = compression
|
| 692 |
+
self.use_checksum = checksum if has_checksum else None
|
| 693 |
+
self.used_size = used_size
|
| 694 |
+
self.allocated_size = allocated_size
|
| 695 |
+
self.data_size = data_size
|
| 696 |
+
|
| 697 |
+
def seek(self, p):
|
| 698 |
+
"""Seek to the given position (relative to the blob start)."""
|
| 699 |
+
if self._f is None:
|
| 700 |
+
raise RuntimeError(
|
| 701 |
+
"Cannot seek in a blob " "that is not created by the BSDF decoder."
|
| 702 |
+
)
|
| 703 |
+
if p < 0:
|
| 704 |
+
p = self.allocated_size + p
|
| 705 |
+
if p < 0 or p > self.allocated_size:
|
| 706 |
+
raise IOError("Seek beyond blob boundaries.")
|
| 707 |
+
self._f.seek(self.start_pos + p)
|
| 708 |
+
|
| 709 |
+
def tell(self):
|
| 710 |
+
"""Get the current file pointer position (relative to the blob start)."""
|
| 711 |
+
if self._f is None:
|
| 712 |
+
raise RuntimeError(
|
| 713 |
+
"Cannot tell in a blob " "that is not created by the BSDF decoder."
|
| 714 |
+
)
|
| 715 |
+
return self._f.tell() - self.start_pos
|
| 716 |
+
|
| 717 |
+
def write(self, bb):
|
| 718 |
+
"""Write bytes to the blob."""
|
| 719 |
+
if self._f is None:
|
| 720 |
+
raise RuntimeError(
|
| 721 |
+
"Cannot write in a blob " "that is not created by the BSDF decoder."
|
| 722 |
+
)
|
| 723 |
+
if self.compression:
|
| 724 |
+
raise IOError("Cannot arbitrarily write in compressed blob.")
|
| 725 |
+
if self._f.tell() + len(bb) > self.end_pos:
|
| 726 |
+
raise IOError("Write beyond blob boundaries.")
|
| 727 |
+
self._modified = True
|
| 728 |
+
return self._f.write(bb)
|
| 729 |
+
|
| 730 |
+
def read(self, n):
|
| 731 |
+
"""Read n bytes from the blob."""
|
| 732 |
+
if self._f is None:
|
| 733 |
+
raise RuntimeError(
|
| 734 |
+
"Cannot read in a blob " "that is not created by the BSDF decoder."
|
| 735 |
+
)
|
| 736 |
+
if self.compression:
|
| 737 |
+
raise IOError("Cannot arbitrarily read in compressed blob.")
|
| 738 |
+
if self._f.tell() + n > self.end_pos:
|
| 739 |
+
raise IOError("Read beyond blob boundaries.")
|
| 740 |
+
return self._f.read(n)
|
| 741 |
+
|
| 742 |
+
def get_bytes(self):
|
| 743 |
+
"""Get the contents of the blob as bytes."""
|
| 744 |
+
if self.compressed is not None:
|
| 745 |
+
compressed = self.compressed
|
| 746 |
+
else:
|
| 747 |
+
i = self._f.tell()
|
| 748 |
+
self.seek(0)
|
| 749 |
+
compressed = self._f.read(self.used_size)
|
| 750 |
+
self._f.seek(i)
|
| 751 |
+
if self.compression == 0:
|
| 752 |
+
value = compressed
|
| 753 |
+
elif self.compression == 1:
|
| 754 |
+
value = zlib.decompress(compressed)
|
| 755 |
+
elif self.compression == 2:
|
| 756 |
+
value = bz2.decompress(compressed)
|
| 757 |
+
else: # pragma: no cover
|
| 758 |
+
raise RuntimeError("Invalid compression %i" % self.compression)
|
| 759 |
+
return value
|
| 760 |
+
|
| 761 |
+
def update_checksum(self):
|
| 762 |
+
"""Reset the blob's checksum if present. Call this after modifying
|
| 763 |
+
the data.
|
| 764 |
+
"""
|
| 765 |
+
# or ... should the presence of a checksum mean that data is proteced?
|
| 766 |
+
if self.use_checksum and self._modified:
|
| 767 |
+
self.seek(0)
|
| 768 |
+
compressed = self._f.read(self.used_size)
|
| 769 |
+
self._f.seek(self.start_pos - self.alignment - 1 - 16)
|
| 770 |
+
self._f.write(hashlib.md5(compressed).digest())
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
# %% High-level functions
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
def encode(ob, extensions=None, **options):
|
| 777 |
+
"""Save (BSDF-encode) the given object to bytes.
|
| 778 |
+
See `BSDFSerializer` for details on extensions and options.
|
| 779 |
+
"""
|
| 780 |
+
s = BsdfSerializer(extensions, **options)
|
| 781 |
+
return s.encode(ob)
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
def save(f, ob, extensions=None, **options):
|
| 785 |
+
"""Save (BSDF-encode) the given object to the given filename or
|
| 786 |
+
file object. See` BSDFSerializer` for details on extensions and options.
|
| 787 |
+
"""
|
| 788 |
+
s = BsdfSerializer(extensions, **options)
|
| 789 |
+
if isinstance(f, string_types):
|
| 790 |
+
with open(f, "wb") as fp:
|
| 791 |
+
return s.save(fp, ob)
|
| 792 |
+
else:
|
| 793 |
+
return s.save(f, ob)
|
| 794 |
+
|
| 795 |
+
|
| 796 |
+
def decode(bb, extensions=None, **options):
|
| 797 |
+
"""Load a (BSDF-encoded) structure from bytes.
|
| 798 |
+
See `BSDFSerializer` for details on extensions and options.
|
| 799 |
+
"""
|
| 800 |
+
s = BsdfSerializer(extensions, **options)
|
| 801 |
+
return s.decode(bb)
|
| 802 |
+
|
| 803 |
+
|
| 804 |
+
def load(f, extensions=None, **options):
|
| 805 |
+
"""Load a (BSDF-encoded) structure from the given filename or file object.
|
| 806 |
+
See `BSDFSerializer` for details on extensions and options.
|
| 807 |
+
"""
|
| 808 |
+
s = BsdfSerializer(extensions, **options)
|
| 809 |
+
if isinstance(f, string_types):
|
| 810 |
+
if f.startswith(("~/", "~\\")): # pragma: no cover
|
| 811 |
+
f = os.path.expanduser(f)
|
| 812 |
+
with open(f, "rb") as fp:
|
| 813 |
+
return s.load(fp)
|
| 814 |
+
else:
|
| 815 |
+
return s.load(f)
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
# Aliases for json compat
|
| 819 |
+
loads = decode
|
| 820 |
+
dumps = encode
|
| 821 |
+
|
| 822 |
+
|
| 823 |
+
# %% Standard extensions
|
| 824 |
+
|
| 825 |
+
# Defining extensions as a dict would be more compact and feel lighter, but
|
| 826 |
+
# that would only allow lambdas, which is too limiting, e.g. for ndarray
|
| 827 |
+
# extension.
|
| 828 |
+
|
| 829 |
+
|
| 830 |
+
class Extension(object):
|
| 831 |
+
"""Base class to implement BSDF extensions for special data types.
|
| 832 |
+
|
| 833 |
+
Extension classes are provided to the BSDF serializer, which
|
| 834 |
+
instantiates the class. That way, the extension can be somewhat dynamic:
|
| 835 |
+
e.g. the NDArrayExtension exposes the ndarray class only when numpy
|
| 836 |
+
is imported.
|
| 837 |
+
|
| 838 |
+
A extension instance must have two attributes. These can be attributes of
|
| 839 |
+
the class, or of the instance set in ``__init__()``:
|
| 840 |
+
|
| 841 |
+
* name (str): the name by which encoded values will be identified.
|
| 842 |
+
* cls (type): the type (or list of types) to match values with.
|
| 843 |
+
This is optional, but it makes the encoder select extensions faster.
|
| 844 |
+
|
| 845 |
+
Further, it needs 3 methods:
|
| 846 |
+
|
| 847 |
+
* `match(serializer, value) -> bool`: return whether the extension can
|
| 848 |
+
convert the given value. The default is ``isinstance(value, self.cls)``.
|
| 849 |
+
* `encode(serializer, value) -> encoded_value`: the function to encode a
|
| 850 |
+
value to more basic data types.
|
| 851 |
+
* `decode(serializer, encoded_value) -> value`: the function to decode an
|
| 852 |
+
encoded value back to its intended representation.
|
| 853 |
+
|
| 854 |
+
"""
|
| 855 |
+
|
| 856 |
+
name = ""
|
| 857 |
+
cls = ()
|
| 858 |
+
|
| 859 |
+
def __repr__(self):
|
| 860 |
+
return "<BSDF extension %r at 0x%s>" % (self.name, hex(id(self)))
|
| 861 |
+
|
| 862 |
+
def match(self, s, v):
|
| 863 |
+
return isinstance(v, self.cls)
|
| 864 |
+
|
| 865 |
+
def encode(self, s, v):
|
| 866 |
+
raise NotImplementedError()
|
| 867 |
+
|
| 868 |
+
def decode(self, s, v):
|
| 869 |
+
raise NotImplementedError()
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
class ComplexExtension(Extension):
|
| 873 |
+
name = "c"
|
| 874 |
+
cls = complex
|
| 875 |
+
|
| 876 |
+
def encode(self, s, v):
|
| 877 |
+
return (v.real, v.imag)
|
| 878 |
+
|
| 879 |
+
def decode(self, s, v):
|
| 880 |
+
return complex(v[0], v[1])
|
| 881 |
+
|
| 882 |
+
|
| 883 |
+
class NDArrayExtension(Extension):
|
| 884 |
+
name = "ndarray"
|
| 885 |
+
|
| 886 |
+
def __init__(self):
|
| 887 |
+
if "numpy" in sys.modules:
|
| 888 |
+
import numpy as np
|
| 889 |
+
|
| 890 |
+
self.cls = np.ndarray
|
| 891 |
+
|
| 892 |
+
def match(self, s, v): # pragma: no cover - e.g. work for nd arrays in JS
|
| 893 |
+
return hasattr(v, "shape") and hasattr(v, "dtype") and hasattr(v, "tobytes")
|
| 894 |
+
|
| 895 |
+
def encode(self, s, v):
|
| 896 |
+
return dict(shape=v.shape, dtype=text_type(v.dtype), data=v.tobytes())
|
| 897 |
+
|
| 898 |
+
def decode(self, s, v):
|
| 899 |
+
try:
|
| 900 |
+
import numpy as np
|
| 901 |
+
except ImportError: # pragma: no cover
|
| 902 |
+
return v
|
| 903 |
+
a = np.frombuffer(v["data"], dtype=v["dtype"])
|
| 904 |
+
a.shape = v["shape"]
|
| 905 |
+
return a
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
standard_extensions = [ComplexExtension, NDArrayExtension]
|
| 909 |
+
|
| 910 |
+
|
| 911 |
+
if __name__ == "__main__":
|
| 912 |
+
# Invoke CLI
|
| 913 |
+
import bsdf_cli
|
| 914 |
+
|
| 915 |
+
bsdf_cli.main()
|
parrot/lib/python3.10/site-packages/imageio/plugins/_dicom.py
ADDED
|
@@ -0,0 +1,932 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 3 |
+
|
| 4 |
+
""" Plugin for reading DICOM files.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
# todo: Use pydicom:
|
| 8 |
+
# * Note: is not py3k ready yet
|
| 9 |
+
# * Allow reading the full meta info
|
| 10 |
+
# I think we can more or less replace the SimpleDicomReader with a
|
| 11 |
+
# pydicom.Dataset For series, only ned to read the full info from one
|
| 12 |
+
# file: speed still high
|
| 13 |
+
# * Perhaps allow writing?
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
import os
|
| 17 |
+
import struct
|
| 18 |
+
import logging
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
# Determine endianity of system
|
| 26 |
+
sys_is_little_endian = sys.byteorder == "little"
|
| 27 |
+
|
| 28 |
+
# Define a dictionary that contains the tags that we would like to know
|
| 29 |
+
MINIDICT = {
|
| 30 |
+
(0x7FE0, 0x0010): ("PixelData", "OB"),
|
| 31 |
+
# Date and time
|
| 32 |
+
(0x0008, 0x0020): ("StudyDate", "DA"),
|
| 33 |
+
(0x0008, 0x0021): ("SeriesDate", "DA"),
|
| 34 |
+
(0x0008, 0x0022): ("AcquisitionDate", "DA"),
|
| 35 |
+
(0x0008, 0x0023): ("ContentDate", "DA"),
|
| 36 |
+
(0x0008, 0x0030): ("StudyTime", "TM"),
|
| 37 |
+
(0x0008, 0x0031): ("SeriesTime", "TM"),
|
| 38 |
+
(0x0008, 0x0032): ("AcquisitionTime", "TM"),
|
| 39 |
+
(0x0008, 0x0033): ("ContentTime", "TM"),
|
| 40 |
+
# With what, where, by whom?
|
| 41 |
+
(0x0008, 0x0060): ("Modality", "CS"),
|
| 42 |
+
(0x0008, 0x0070): ("Manufacturer", "LO"),
|
| 43 |
+
(0x0008, 0x0080): ("InstitutionName", "LO"),
|
| 44 |
+
# Descriptions
|
| 45 |
+
(0x0008, 0x1030): ("StudyDescription", "LO"),
|
| 46 |
+
(0x0008, 0x103E): ("SeriesDescription", "LO"),
|
| 47 |
+
# UID's
|
| 48 |
+
(0x0008, 0x0016): ("SOPClassUID", "UI"),
|
| 49 |
+
(0x0008, 0x0018): ("SOPInstanceUID", "UI"),
|
| 50 |
+
(0x0020, 0x000D): ("StudyInstanceUID", "UI"),
|
| 51 |
+
(0x0020, 0x000E): ("SeriesInstanceUID", "UI"),
|
| 52 |
+
(0x0008, 0x0117): ("ContextUID", "UI"),
|
| 53 |
+
# Numbers
|
| 54 |
+
(0x0020, 0x0011): ("SeriesNumber", "IS"),
|
| 55 |
+
(0x0020, 0x0012): ("AcquisitionNumber", "IS"),
|
| 56 |
+
(0x0020, 0x0013): ("InstanceNumber", "IS"),
|
| 57 |
+
(0x0020, 0x0014): ("IsotopeNumber", "IS"),
|
| 58 |
+
(0x0020, 0x0015): ("PhaseNumber", "IS"),
|
| 59 |
+
(0x0020, 0x0016): ("IntervalNumber", "IS"),
|
| 60 |
+
(0x0020, 0x0017): ("TimeSlotNumber", "IS"),
|
| 61 |
+
(0x0020, 0x0018): ("AngleNumber", "IS"),
|
| 62 |
+
(0x0020, 0x0019): ("ItemNumber", "IS"),
|
| 63 |
+
(0x0020, 0x0020): ("PatientOrientation", "CS"),
|
| 64 |
+
(0x0020, 0x0030): ("ImagePosition", "CS"),
|
| 65 |
+
(0x0020, 0x0032): ("ImagePositionPatient", "CS"),
|
| 66 |
+
(0x0020, 0x0035): ("ImageOrientation", "CS"),
|
| 67 |
+
(0x0020, 0x0037): ("ImageOrientationPatient", "CS"),
|
| 68 |
+
# Patient information
|
| 69 |
+
(0x0010, 0x0010): ("PatientName", "PN"),
|
| 70 |
+
(0x0010, 0x0020): ("PatientID", "LO"),
|
| 71 |
+
(0x0010, 0x0030): ("PatientBirthDate", "DA"),
|
| 72 |
+
(0x0010, 0x0040): ("PatientSex", "CS"),
|
| 73 |
+
(0x0010, 0x1010): ("PatientAge", "AS"),
|
| 74 |
+
(0x0010, 0x1020): ("PatientSize", "DS"),
|
| 75 |
+
(0x0010, 0x1030): ("PatientWeight", "DS"),
|
| 76 |
+
# Image specific (required to construct numpy array)
|
| 77 |
+
(0x0028, 0x0002): ("SamplesPerPixel", "US"),
|
| 78 |
+
(0x0028, 0x0008): ("NumberOfFrames", "IS"),
|
| 79 |
+
(0x0028, 0x0100): ("BitsAllocated", "US"),
|
| 80 |
+
(0x0028, 0x0101): ("BitsStored", "US"),
|
| 81 |
+
(0x0028, 0x0102): ("HighBit", "US"),
|
| 82 |
+
(0x0028, 0x0103): ("PixelRepresentation", "US"),
|
| 83 |
+
(0x0028, 0x0010): ("Rows", "US"),
|
| 84 |
+
(0x0028, 0x0011): ("Columns", "US"),
|
| 85 |
+
(0x0028, 0x1052): ("RescaleIntercept", "DS"),
|
| 86 |
+
(0x0028, 0x1053): ("RescaleSlope", "DS"),
|
| 87 |
+
# Image specific (for the user)
|
| 88 |
+
(0x0028, 0x0030): ("PixelSpacing", "DS"),
|
| 89 |
+
(0x0018, 0x0088): ("SliceSpacing", "DS"),
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
# Define some special tags:
|
| 93 |
+
# See PS 3.5-2008 section 7.5 (p.40)
|
| 94 |
+
ItemTag = (0xFFFE, 0xE000) # start of Sequence Item
|
| 95 |
+
ItemDelimiterTag = (0xFFFE, 0xE00D) # end of Sequence Item
|
| 96 |
+
SequenceDelimiterTag = (0xFFFE, 0xE0DD) # end of Sequence of undefined length
|
| 97 |
+
|
| 98 |
+
# Define set of groups that we're interested in (so we can quickly skip others)
|
| 99 |
+
GROUPS = set([key[0] for key in MINIDICT.keys()])
|
| 100 |
+
VRS = set([val[1] for val in MINIDICT.values()])
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class NotADicomFile(Exception):
|
| 104 |
+
pass
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class CompressedDicom(RuntimeError):
|
| 108 |
+
pass
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class SimpleDicomReader(object):
|
| 112 |
+
"""
|
| 113 |
+
This class provides reading of pixel data from DICOM files. It is
|
| 114 |
+
focussed on getting the pixel data, not the meta info.
|
| 115 |
+
|
| 116 |
+
To use, first create an instance of this class (giving it
|
| 117 |
+
a file object or filename). Next use the info attribute to
|
| 118 |
+
get a dict of the meta data. The loading of pixel data is
|
| 119 |
+
deferred until get_numpy_array() is called.
|
| 120 |
+
|
| 121 |
+
Comparison with Pydicom
|
| 122 |
+
-----------------------
|
| 123 |
+
|
| 124 |
+
This code focusses on getting the pixel data out, which allows some
|
| 125 |
+
shortcuts, resulting in the code being much smaller.
|
| 126 |
+
|
| 127 |
+
Since the processing of data elements is much cheaper (it skips a lot
|
| 128 |
+
of tags), this code is about 3x faster than pydicom (except for the
|
| 129 |
+
deflated DICOM files).
|
| 130 |
+
|
| 131 |
+
This class does borrow some code (and ideas) from the pydicom
|
| 132 |
+
project, and (to the best of our knowledge) has the same limitations
|
| 133 |
+
as pydicom with regard to the type of files that it can handle.
|
| 134 |
+
|
| 135 |
+
Limitations
|
| 136 |
+
-----------
|
| 137 |
+
|
| 138 |
+
For more advanced DICOM processing, please check out pydicom.
|
| 139 |
+
|
| 140 |
+
* Only a predefined subset of data elements (meta information) is read.
|
| 141 |
+
* This is a reader; it can not write DICOM files.
|
| 142 |
+
* (just like pydicom) it can handle none of the compressed DICOM
|
| 143 |
+
formats except for "Deflated Explicit VR Little Endian"
|
| 144 |
+
(1.2.840.10008.1.2.1.99).
|
| 145 |
+
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
def __init__(self, file):
|
| 149 |
+
# Open file if filename given
|
| 150 |
+
if isinstance(file, str):
|
| 151 |
+
self._filename = file
|
| 152 |
+
self._file = open(file, "rb")
|
| 153 |
+
else:
|
| 154 |
+
self._filename = "<unknown file>"
|
| 155 |
+
self._file = file
|
| 156 |
+
# Init variable to store position and size of pixel data
|
| 157 |
+
self._pixel_data_loc = None
|
| 158 |
+
# The meta header is always explicit and little endian
|
| 159 |
+
self.is_implicit_VR = False
|
| 160 |
+
self.is_little_endian = True
|
| 161 |
+
self._unpackPrefix = "<"
|
| 162 |
+
# Dict to store data elements of interest in
|
| 163 |
+
self._info = {}
|
| 164 |
+
# VR Conversion
|
| 165 |
+
self._converters = {
|
| 166 |
+
# Numbers
|
| 167 |
+
"US": lambda x: self._unpack("H", x),
|
| 168 |
+
"UL": lambda x: self._unpack("L", x),
|
| 169 |
+
# Numbers encoded as strings
|
| 170 |
+
"DS": lambda x: self._splitValues(x, float, "\\"),
|
| 171 |
+
"IS": lambda x: self._splitValues(x, int, "\\"),
|
| 172 |
+
# strings
|
| 173 |
+
"AS": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
| 174 |
+
"DA": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
| 175 |
+
"TM": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
| 176 |
+
"UI": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
| 177 |
+
"LO": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
|
| 178 |
+
"CS": lambda x: self._splitValues(x, float, "\\"),
|
| 179 |
+
"PN": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
# Initiate reading
|
| 183 |
+
self._read()
|
| 184 |
+
|
| 185 |
+
@property
|
| 186 |
+
def info(self):
|
| 187 |
+
return self._info
|
| 188 |
+
|
| 189 |
+
def _splitValues(self, x, type, splitter):
|
| 190 |
+
s = x.decode("ascii").strip("\x00")
|
| 191 |
+
try:
|
| 192 |
+
if splitter in s:
|
| 193 |
+
return tuple([type(v) for v in s.split(splitter) if v.strip()])
|
| 194 |
+
else:
|
| 195 |
+
return type(s)
|
| 196 |
+
except ValueError:
|
| 197 |
+
return s
|
| 198 |
+
|
| 199 |
+
def _unpack(self, fmt, value):
|
| 200 |
+
return struct.unpack(self._unpackPrefix + fmt, value)[0]
|
| 201 |
+
|
| 202 |
+
# Really only so we need minimal changes to _pixel_data_numpy
|
| 203 |
+
def __iter__(self):
|
| 204 |
+
return iter(self._info.keys())
|
| 205 |
+
|
| 206 |
+
def __getattr__(self, key):
|
| 207 |
+
info = object.__getattribute__(self, "_info")
|
| 208 |
+
if key in info:
|
| 209 |
+
return info[key]
|
| 210 |
+
return object.__getattribute__(self, key) # pragma: no cover
|
| 211 |
+
|
| 212 |
+
def _read(self):
|
| 213 |
+
f = self._file
|
| 214 |
+
# Check prefix after peamble
|
| 215 |
+
f.seek(128)
|
| 216 |
+
if f.read(4) != b"DICM":
|
| 217 |
+
raise NotADicomFile("Not a valid DICOM file.")
|
| 218 |
+
# Read
|
| 219 |
+
self._read_header()
|
| 220 |
+
self._read_data_elements()
|
| 221 |
+
self._get_shape_and_sampling()
|
| 222 |
+
# Close if done, reopen if necessary to read pixel data
|
| 223 |
+
if os.path.isfile(self._filename):
|
| 224 |
+
self._file.close()
|
| 225 |
+
self._file = None
|
| 226 |
+
|
| 227 |
+
def _readDataElement(self):
|
| 228 |
+
f = self._file
|
| 229 |
+
# Get group and element
|
| 230 |
+
group = self._unpack("H", f.read(2))
|
| 231 |
+
element = self._unpack("H", f.read(2))
|
| 232 |
+
# Get value length
|
| 233 |
+
if self.is_implicit_VR:
|
| 234 |
+
vl = self._unpack("I", f.read(4))
|
| 235 |
+
else:
|
| 236 |
+
vr = f.read(2)
|
| 237 |
+
if vr in (b"OB", b"OW", b"SQ", b"UN"):
|
| 238 |
+
reserved = f.read(2) # noqa
|
| 239 |
+
vl = self._unpack("I", f.read(4))
|
| 240 |
+
else:
|
| 241 |
+
vl = self._unpack("H", f.read(2))
|
| 242 |
+
# Get value
|
| 243 |
+
if group == 0x7FE0 and element == 0x0010:
|
| 244 |
+
here = f.tell()
|
| 245 |
+
self._pixel_data_loc = here, vl
|
| 246 |
+
f.seek(here + vl)
|
| 247 |
+
return group, element, b"Deferred loading of pixel data"
|
| 248 |
+
else:
|
| 249 |
+
if vl == 0xFFFFFFFF:
|
| 250 |
+
value = self._read_undefined_length_value()
|
| 251 |
+
else:
|
| 252 |
+
value = f.read(vl)
|
| 253 |
+
return group, element, value
|
| 254 |
+
|
| 255 |
+
def _read_undefined_length_value(self, read_size=128):
|
| 256 |
+
"""Copied (in compacted form) from PyDicom
|
| 257 |
+
Copyright Darcy Mason.
|
| 258 |
+
"""
|
| 259 |
+
fp = self._file
|
| 260 |
+
# data_start = fp.tell()
|
| 261 |
+
search_rewind = 3
|
| 262 |
+
bytes_to_find = struct.pack(
|
| 263 |
+
self._unpackPrefix + "HH", SequenceDelimiterTag[0], SequenceDelimiterTag[1]
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
found = False
|
| 267 |
+
value_chunks = []
|
| 268 |
+
while not found:
|
| 269 |
+
chunk_start = fp.tell()
|
| 270 |
+
bytes_read = fp.read(read_size)
|
| 271 |
+
if len(bytes_read) < read_size:
|
| 272 |
+
# try again,
|
| 273 |
+
# if still don't get required amount, this is last block
|
| 274 |
+
new_bytes = fp.read(read_size - len(bytes_read))
|
| 275 |
+
bytes_read += new_bytes
|
| 276 |
+
if len(bytes_read) < read_size:
|
| 277 |
+
raise EOFError(
|
| 278 |
+
"End of file reached before sequence " "delimiter found."
|
| 279 |
+
)
|
| 280 |
+
index = bytes_read.find(bytes_to_find)
|
| 281 |
+
if index != -1:
|
| 282 |
+
found = True
|
| 283 |
+
value_chunks.append(bytes_read[:index])
|
| 284 |
+
fp.seek(chunk_start + index + 4) # rewind to end of delimiter
|
| 285 |
+
length = fp.read(4)
|
| 286 |
+
if length != b"\0\0\0\0":
|
| 287 |
+
logger.warning(
|
| 288 |
+
"Expected 4 zero bytes after undefined length " "delimiter"
|
| 289 |
+
)
|
| 290 |
+
else:
|
| 291 |
+
fp.seek(fp.tell() - search_rewind) # rewind a bit
|
| 292 |
+
# accumulate the bytes read (not including the rewind)
|
| 293 |
+
value_chunks.append(bytes_read[:-search_rewind])
|
| 294 |
+
|
| 295 |
+
# if get here then have found the byte string
|
| 296 |
+
return b"".join(value_chunks)
|
| 297 |
+
|
| 298 |
+
def _read_header(self):
|
| 299 |
+
f = self._file
|
| 300 |
+
TransferSyntaxUID = None
|
| 301 |
+
|
| 302 |
+
# Read all elements, store transferSyntax when we encounter it
|
| 303 |
+
try:
|
| 304 |
+
while True:
|
| 305 |
+
fp_save = f.tell()
|
| 306 |
+
# Get element
|
| 307 |
+
group, element, value = self._readDataElement()
|
| 308 |
+
if group == 0x02:
|
| 309 |
+
if group == 0x02 and element == 0x10:
|
| 310 |
+
TransferSyntaxUID = value.decode("ascii").strip("\x00")
|
| 311 |
+
else:
|
| 312 |
+
# No more group 2: rewind and break
|
| 313 |
+
# (don't trust group length)
|
| 314 |
+
f.seek(fp_save)
|
| 315 |
+
break
|
| 316 |
+
except (EOFError, struct.error): # pragma: no cover
|
| 317 |
+
raise RuntimeError("End of file reached while still in header.")
|
| 318 |
+
|
| 319 |
+
# Handle transfer syntax
|
| 320 |
+
self._info["TransferSyntaxUID"] = TransferSyntaxUID
|
| 321 |
+
#
|
| 322 |
+
if TransferSyntaxUID is None:
|
| 323 |
+
# Assume ExplicitVRLittleEndian
|
| 324 |
+
is_implicit_VR, is_little_endian = False, True
|
| 325 |
+
elif TransferSyntaxUID == "1.2.840.10008.1.2.1":
|
| 326 |
+
# ExplicitVRLittleEndian
|
| 327 |
+
is_implicit_VR, is_little_endian = False, True
|
| 328 |
+
elif TransferSyntaxUID == "1.2.840.10008.1.2.2":
|
| 329 |
+
# ExplicitVRBigEndian
|
| 330 |
+
is_implicit_VR, is_little_endian = False, False
|
| 331 |
+
elif TransferSyntaxUID == "1.2.840.10008.1.2":
|
| 332 |
+
# implicit VR little endian
|
| 333 |
+
is_implicit_VR, is_little_endian = True, True
|
| 334 |
+
elif TransferSyntaxUID == "1.2.840.10008.1.2.1.99":
|
| 335 |
+
# DeflatedExplicitVRLittleEndian:
|
| 336 |
+
is_implicit_VR, is_little_endian = False, True
|
| 337 |
+
self._inflate()
|
| 338 |
+
else:
|
| 339 |
+
# http://www.dicomlibrary.com/dicom/transfer-syntax/
|
| 340 |
+
t, extra_info = TransferSyntaxUID, ""
|
| 341 |
+
if "1.2.840.10008.1.2.4.50" <= t < "1.2.840.10008.1.2.4.99":
|
| 342 |
+
extra_info = " (JPEG)"
|
| 343 |
+
if "1.2.840.10008.1.2.4.90" <= t < "1.2.840.10008.1.2.4.99":
|
| 344 |
+
extra_info = " (JPEG 2000)"
|
| 345 |
+
if t == "1.2.840.10008.1.2.5":
|
| 346 |
+
extra_info = " (RLE)"
|
| 347 |
+
if t == "1.2.840.10008.1.2.6.1":
|
| 348 |
+
extra_info = " (RFC 2557)"
|
| 349 |
+
raise CompressedDicom(
|
| 350 |
+
"The dicom reader can only read files with "
|
| 351 |
+
"uncompressed image data - not %r%s. You "
|
| 352 |
+
"can try using dcmtk or gdcm to convert the "
|
| 353 |
+
"image." % (t, extra_info)
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
# From hereon, use implicit/explicit big/little endian
|
| 357 |
+
self.is_implicit_VR = is_implicit_VR
|
| 358 |
+
self.is_little_endian = is_little_endian
|
| 359 |
+
self._unpackPrefix = "><"[is_little_endian]
|
| 360 |
+
|
| 361 |
+
def _read_data_elements(self):
|
| 362 |
+
info = self._info
|
| 363 |
+
try:
|
| 364 |
+
while True:
|
| 365 |
+
# Get element
|
| 366 |
+
group, element, value = self._readDataElement()
|
| 367 |
+
# Is it a group we are interested in?
|
| 368 |
+
if group in GROUPS:
|
| 369 |
+
key = (group, element)
|
| 370 |
+
name, vr = MINIDICT.get(key, (None, None))
|
| 371 |
+
# Is it an element we are interested in?
|
| 372 |
+
if name:
|
| 373 |
+
# Store value
|
| 374 |
+
converter = self._converters.get(vr, lambda x: x)
|
| 375 |
+
info[name] = converter(value)
|
| 376 |
+
except (EOFError, struct.error):
|
| 377 |
+
pass # end of file ...
|
| 378 |
+
|
| 379 |
+
def get_numpy_array(self):
|
| 380 |
+
"""Get numpy arra for this DICOM file, with the correct shape,
|
| 381 |
+
and pixel values scaled appropriately.
|
| 382 |
+
"""
|
| 383 |
+
# Is there pixel data at all?
|
| 384 |
+
if "PixelData" not in self:
|
| 385 |
+
raise TypeError("No pixel data found in this dataset.")
|
| 386 |
+
|
| 387 |
+
# Load it now if it was not already loaded
|
| 388 |
+
if self._pixel_data_loc and len(self.PixelData) < 100:
|
| 389 |
+
# Reopen file?
|
| 390 |
+
close_file = False
|
| 391 |
+
if self._file is None:
|
| 392 |
+
close_file = True
|
| 393 |
+
self._file = open(self._filename, "rb")
|
| 394 |
+
# Read data
|
| 395 |
+
self._file.seek(self._pixel_data_loc[0])
|
| 396 |
+
if self._pixel_data_loc[1] == 0xFFFFFFFF:
|
| 397 |
+
value = self._read_undefined_length_value()
|
| 398 |
+
else:
|
| 399 |
+
value = self._file.read(self._pixel_data_loc[1])
|
| 400 |
+
# Close file
|
| 401 |
+
if close_file:
|
| 402 |
+
self._file.close()
|
| 403 |
+
self._file = None
|
| 404 |
+
# Overwrite
|
| 405 |
+
self._info["PixelData"] = value
|
| 406 |
+
|
| 407 |
+
# Get data
|
| 408 |
+
data = self._pixel_data_numpy()
|
| 409 |
+
data = self._apply_slope_and_offset(data)
|
| 410 |
+
|
| 411 |
+
# Remove data again to preserve memory
|
| 412 |
+
# Note that the data for the original file is loaded twice ...
|
| 413 |
+
self._info["PixelData"] = (
|
| 414 |
+
b"Data converted to numpy array, " + b"raw data removed to preserve memory"
|
| 415 |
+
)
|
| 416 |
+
return data
|
| 417 |
+
|
| 418 |
+
def _get_shape_and_sampling(self):
|
| 419 |
+
"""Get shape and sampling without actuall using the pixel data.
|
| 420 |
+
In this way, the user can get an idea what's inside without having
|
| 421 |
+
to load it.
|
| 422 |
+
"""
|
| 423 |
+
# Get shape (in the same way that pydicom does)
|
| 424 |
+
if "NumberOfFrames" in self and self.NumberOfFrames > 1:
|
| 425 |
+
if self.SamplesPerPixel > 1:
|
| 426 |
+
shape = (
|
| 427 |
+
self.SamplesPerPixel,
|
| 428 |
+
self.NumberOfFrames,
|
| 429 |
+
self.Rows,
|
| 430 |
+
self.Columns,
|
| 431 |
+
)
|
| 432 |
+
else:
|
| 433 |
+
shape = self.NumberOfFrames, self.Rows, self.Columns
|
| 434 |
+
elif "SamplesPerPixel" in self:
|
| 435 |
+
if self.SamplesPerPixel > 1:
|
| 436 |
+
if self.BitsAllocated == 8:
|
| 437 |
+
shape = self.SamplesPerPixel, self.Rows, self.Columns
|
| 438 |
+
else:
|
| 439 |
+
raise NotImplementedError(
|
| 440 |
+
"DICOM plugin only handles "
|
| 441 |
+
"SamplesPerPixel > 1 if Bits "
|
| 442 |
+
"Allocated = 8"
|
| 443 |
+
)
|
| 444 |
+
else:
|
| 445 |
+
shape = self.Rows, self.Columns
|
| 446 |
+
else:
|
| 447 |
+
raise RuntimeError(
|
| 448 |
+
"DICOM file has no SamplesPerPixel " "(perhaps this is a report?)"
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
# Try getting sampling between pixels
|
| 452 |
+
if "PixelSpacing" in self:
|
| 453 |
+
sampling = float(self.PixelSpacing[0]), float(self.PixelSpacing[1])
|
| 454 |
+
else:
|
| 455 |
+
sampling = 1.0, 1.0
|
| 456 |
+
if "SliceSpacing" in self:
|
| 457 |
+
sampling = (abs(self.SliceSpacing),) + sampling
|
| 458 |
+
|
| 459 |
+
# Ensure that sampling has as many elements as shape
|
| 460 |
+
sampling = (1.0,) * (len(shape) - len(sampling)) + sampling[-len(shape) :]
|
| 461 |
+
|
| 462 |
+
# Set shape and sampling
|
| 463 |
+
self._info["shape"] = shape
|
| 464 |
+
self._info["sampling"] = sampling
|
| 465 |
+
|
| 466 |
+
def _pixel_data_numpy(self):
|
| 467 |
+
"""Return a NumPy array of the pixel data."""
|
| 468 |
+
# Taken from pydicom
|
| 469 |
+
# Copyright (c) 2008-2012 Darcy Mason
|
| 470 |
+
|
| 471 |
+
if "PixelData" not in self:
|
| 472 |
+
raise TypeError("No pixel data found in this dataset.")
|
| 473 |
+
|
| 474 |
+
# determine the type used for the array
|
| 475 |
+
need_byteswap = self.is_little_endian != sys_is_little_endian
|
| 476 |
+
|
| 477 |
+
# Make NumPy format code, e.g. "uint16", "int32" etc
|
| 478 |
+
# from two pieces of info:
|
| 479 |
+
# self.PixelRepresentation -- 0 for unsigned, 1 for signed;
|
| 480 |
+
# self.BitsAllocated -- 8, 16, or 32
|
| 481 |
+
format_str = "%sint%d" % (
|
| 482 |
+
("u", "")[self.PixelRepresentation],
|
| 483 |
+
self.BitsAllocated,
|
| 484 |
+
)
|
| 485 |
+
try:
|
| 486 |
+
numpy_format = np.dtype(format_str)
|
| 487 |
+
except TypeError: # pragma: no cover
|
| 488 |
+
raise TypeError(
|
| 489 |
+
"Data type not understood by NumPy: format='%s', "
|
| 490 |
+
" PixelRepresentation=%d, BitsAllocated=%d"
|
| 491 |
+
% (numpy_format, self.PixelRepresentation, self.BitsAllocated)
|
| 492 |
+
)
|
| 493 |
+
|
| 494 |
+
# Have correct Numpy format, so create the NumPy array
|
| 495 |
+
arr = np.frombuffer(self.PixelData, numpy_format).copy()
|
| 496 |
+
|
| 497 |
+
# XXX byte swap - may later handle this in read_file!!?
|
| 498 |
+
if need_byteswap:
|
| 499 |
+
arr.byteswap(True) # True means swap in-place, don't make new copy
|
| 500 |
+
|
| 501 |
+
# Note the following reshape operations return a new *view* onto arr,
|
| 502 |
+
# but don't copy the data
|
| 503 |
+
arr = arr.reshape(*self._info["shape"])
|
| 504 |
+
return arr
|
| 505 |
+
|
| 506 |
+
def _apply_slope_and_offset(self, data):
|
| 507 |
+
"""
|
| 508 |
+
If RescaleSlope and RescaleIntercept are present in the data,
|
| 509 |
+
apply them. The data type of the data is changed if necessary.
|
| 510 |
+
"""
|
| 511 |
+
# Obtain slope and offset
|
| 512 |
+
slope, offset = 1, 0
|
| 513 |
+
needFloats, needApplySlopeOffset = False, False
|
| 514 |
+
if "RescaleSlope" in self:
|
| 515 |
+
needApplySlopeOffset = True
|
| 516 |
+
slope = self.RescaleSlope
|
| 517 |
+
if "RescaleIntercept" in self:
|
| 518 |
+
needApplySlopeOffset = True
|
| 519 |
+
offset = self.RescaleIntercept
|
| 520 |
+
if int(slope) != slope or int(offset) != offset:
|
| 521 |
+
needFloats = True
|
| 522 |
+
if not needFloats:
|
| 523 |
+
slope, offset = int(slope), int(offset)
|
| 524 |
+
|
| 525 |
+
# Apply slope and offset
|
| 526 |
+
if needApplySlopeOffset:
|
| 527 |
+
# Maybe we need to change the datatype?
|
| 528 |
+
if data.dtype in [np.float32, np.float64]:
|
| 529 |
+
pass
|
| 530 |
+
elif needFloats:
|
| 531 |
+
data = data.astype(np.float32)
|
| 532 |
+
else:
|
| 533 |
+
# Determine required range
|
| 534 |
+
minReq, maxReq = data.min().item(), data.max().item()
|
| 535 |
+
minReq = min([minReq, minReq * slope + offset, maxReq * slope + offset])
|
| 536 |
+
maxReq = max([maxReq, minReq * slope + offset, maxReq * slope + offset])
|
| 537 |
+
|
| 538 |
+
# Determine required datatype from that
|
| 539 |
+
dtype = None
|
| 540 |
+
if minReq < 0:
|
| 541 |
+
# Signed integer type
|
| 542 |
+
maxReq = max([-minReq, maxReq])
|
| 543 |
+
if maxReq < 2**7:
|
| 544 |
+
dtype = np.int8
|
| 545 |
+
elif maxReq < 2**15:
|
| 546 |
+
dtype = np.int16
|
| 547 |
+
elif maxReq < 2**31:
|
| 548 |
+
dtype = np.int32
|
| 549 |
+
else:
|
| 550 |
+
dtype = np.float32
|
| 551 |
+
else:
|
| 552 |
+
# Unsigned integer type
|
| 553 |
+
if maxReq < 2**8:
|
| 554 |
+
dtype = np.int8
|
| 555 |
+
elif maxReq < 2**16:
|
| 556 |
+
dtype = np.int16
|
| 557 |
+
elif maxReq < 2**32:
|
| 558 |
+
dtype = np.int32
|
| 559 |
+
else:
|
| 560 |
+
dtype = np.float32
|
| 561 |
+
# Change datatype
|
| 562 |
+
if dtype != data.dtype:
|
| 563 |
+
data = data.astype(dtype)
|
| 564 |
+
|
| 565 |
+
# Apply slope and offset
|
| 566 |
+
data *= slope
|
| 567 |
+
data += offset
|
| 568 |
+
|
| 569 |
+
# Done
|
| 570 |
+
return data
|
| 571 |
+
|
| 572 |
+
def _inflate(self):
|
| 573 |
+
# Taken from pydicom
|
| 574 |
+
# Copyright (c) 2008-2012 Darcy Mason
|
| 575 |
+
import zlib
|
| 576 |
+
from io import BytesIO
|
| 577 |
+
|
| 578 |
+
# See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset
|
| 579 |
+
# following the file metadata was prepared the normal way,
|
| 580 |
+
# then "deflate" compression applied.
|
| 581 |
+
# All that is needed here is to decompress and then
|
| 582 |
+
# use as normal in a file-like object
|
| 583 |
+
zipped = self._file.read()
|
| 584 |
+
# -MAX_WBITS part is from comp.lang.python answer:
|
| 585 |
+
# groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
|
| 586 |
+
unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
|
| 587 |
+
self._file = BytesIO(unzipped) # a file-like object
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
class DicomSeries(object):
|
| 591 |
+
"""DicomSeries
|
| 592 |
+
This class represents a serie of dicom files (SimpleDicomReader
|
| 593 |
+
objects) that belong together. If these are multiple files, they
|
| 594 |
+
represent the slices of a volume (like for CT or MRI).
|
| 595 |
+
"""
|
| 596 |
+
|
| 597 |
+
def __init__(self, suid, progressIndicator):
|
| 598 |
+
# Init dataset list and the callback
|
| 599 |
+
self._entries = []
|
| 600 |
+
|
| 601 |
+
# Init props
|
| 602 |
+
self._suid = suid
|
| 603 |
+
self._info = {}
|
| 604 |
+
self._progressIndicator = progressIndicator
|
| 605 |
+
|
| 606 |
+
def __len__(self):
|
| 607 |
+
return len(self._entries)
|
| 608 |
+
|
| 609 |
+
def __iter__(self):
|
| 610 |
+
return iter(self._entries)
|
| 611 |
+
|
| 612 |
+
def __getitem__(self, index):
|
| 613 |
+
return self._entries[index]
|
| 614 |
+
|
| 615 |
+
@property
|
| 616 |
+
def suid(self):
|
| 617 |
+
return self._suid
|
| 618 |
+
|
| 619 |
+
@property
|
| 620 |
+
def shape(self):
|
| 621 |
+
"""The shape of the data (nz, ny, nx)."""
|
| 622 |
+
return self._info["shape"]
|
| 623 |
+
|
| 624 |
+
@property
|
| 625 |
+
def sampling(self):
|
| 626 |
+
"""The sampling (voxel distances) of the data (dz, dy, dx)."""
|
| 627 |
+
return self._info["sampling"]
|
| 628 |
+
|
| 629 |
+
@property
|
| 630 |
+
def info(self):
|
| 631 |
+
"""A dictionary containing the information as present in the
|
| 632 |
+
first dicomfile of this serie. None if there are no entries."""
|
| 633 |
+
return self._info
|
| 634 |
+
|
| 635 |
+
@property
|
| 636 |
+
def description(self):
|
| 637 |
+
"""A description of the dicom series. Used fields are
|
| 638 |
+
PatientName, shape of the data, SeriesDescription, and
|
| 639 |
+
ImageComments.
|
| 640 |
+
"""
|
| 641 |
+
info = self.info
|
| 642 |
+
|
| 643 |
+
# If no info available, return simple description
|
| 644 |
+
if not info: # pragma: no cover
|
| 645 |
+
return "DicomSeries containing %i images" % len(self)
|
| 646 |
+
|
| 647 |
+
fields = []
|
| 648 |
+
# Give patient name
|
| 649 |
+
if "PatientName" in info:
|
| 650 |
+
fields.append("" + info["PatientName"])
|
| 651 |
+
# Also add dimensions
|
| 652 |
+
if self.shape:
|
| 653 |
+
tmp = [str(d) for d in self.shape]
|
| 654 |
+
fields.append("x".join(tmp))
|
| 655 |
+
# Try adding more fields
|
| 656 |
+
if "SeriesDescription" in info:
|
| 657 |
+
fields.append("'" + info["SeriesDescription"] + "'")
|
| 658 |
+
if "ImageComments" in info:
|
| 659 |
+
fields.append("'" + info["ImageComments"] + "'")
|
| 660 |
+
|
| 661 |
+
# Combine
|
| 662 |
+
return " ".join(fields)
|
| 663 |
+
|
| 664 |
+
def __repr__(self):
|
| 665 |
+
adr = hex(id(self)).upper()
|
| 666 |
+
return "<DicomSeries with %i images at %s>" % (len(self), adr)
|
| 667 |
+
|
| 668 |
+
def get_numpy_array(self):
|
| 669 |
+
"""Get (load) the data that this DicomSeries represents, and return
|
| 670 |
+
it as a numpy array. If this serie contains multiple images, the
|
| 671 |
+
resulting array is 3D, otherwise it's 2D.
|
| 672 |
+
"""
|
| 673 |
+
|
| 674 |
+
# It's easy if no file or if just a single file
|
| 675 |
+
if len(self) == 0:
|
| 676 |
+
raise ValueError("Serie does not contain any files.")
|
| 677 |
+
elif len(self) == 1:
|
| 678 |
+
return self[0].get_numpy_array()
|
| 679 |
+
|
| 680 |
+
# Check info
|
| 681 |
+
if self.info is None:
|
| 682 |
+
raise RuntimeError("Cannot return volume if series not finished.")
|
| 683 |
+
|
| 684 |
+
# Init data (using what the dicom packaged produces as a reference)
|
| 685 |
+
slice = self[0].get_numpy_array()
|
| 686 |
+
vol = np.zeros(self.shape, dtype=slice.dtype)
|
| 687 |
+
vol[0] = slice
|
| 688 |
+
|
| 689 |
+
# Fill volume
|
| 690 |
+
self._progressIndicator.start("loading data", "", len(self))
|
| 691 |
+
for z in range(1, len(self)):
|
| 692 |
+
vol[z] = self[z].get_numpy_array()
|
| 693 |
+
self._progressIndicator.set_progress(z + 1)
|
| 694 |
+
self._progressIndicator.finish()
|
| 695 |
+
|
| 696 |
+
# Done
|
| 697 |
+
import gc
|
| 698 |
+
|
| 699 |
+
gc.collect()
|
| 700 |
+
return vol
|
| 701 |
+
|
| 702 |
+
def _append(self, dcm):
|
| 703 |
+
self._entries.append(dcm)
|
| 704 |
+
|
| 705 |
+
def _sort(self):
|
| 706 |
+
self._entries.sort(
|
| 707 |
+
key=lambda k: (
|
| 708 |
+
k.InstanceNumber,
|
| 709 |
+
(
|
| 710 |
+
k.ImagePositionPatient[2]
|
| 711 |
+
if hasattr(k, "ImagePositionPatient")
|
| 712 |
+
else None
|
| 713 |
+
),
|
| 714 |
+
)
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
def _finish(self):
|
| 718 |
+
"""
|
| 719 |
+
Evaluate the series of dicom files. Together they should make up
|
| 720 |
+
a volumetric dataset. This means the files should meet certain
|
| 721 |
+
conditions. Also some additional information has to be calculated,
|
| 722 |
+
such as the distance between the slices. This method sets the
|
| 723 |
+
attributes for "shape", "sampling" and "info".
|
| 724 |
+
|
| 725 |
+
This method checks:
|
| 726 |
+
* that there are no missing files
|
| 727 |
+
* that the dimensions of all images match
|
| 728 |
+
* that the pixel spacing of all images match
|
| 729 |
+
"""
|
| 730 |
+
|
| 731 |
+
# The datasets list should be sorted by instance number
|
| 732 |
+
L = self._entries
|
| 733 |
+
if len(L) == 0:
|
| 734 |
+
return
|
| 735 |
+
elif len(L) == 1:
|
| 736 |
+
self._info = L[0].info
|
| 737 |
+
return
|
| 738 |
+
|
| 739 |
+
# Get previous
|
| 740 |
+
ds1 = L[0]
|
| 741 |
+
# Init measures to calculate average of
|
| 742 |
+
distance_sum = 0.0
|
| 743 |
+
# Init measures to check (these are in 2D)
|
| 744 |
+
dimensions = ds1.Rows, ds1.Columns
|
| 745 |
+
# sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1])
|
| 746 |
+
sampling = ds1.info["sampling"][:2] # row, column
|
| 747 |
+
|
| 748 |
+
for index in range(len(L)):
|
| 749 |
+
# The first round ds1 and ds2 will be the same, for the
|
| 750 |
+
# distance calculation this does not matter
|
| 751 |
+
# Get current
|
| 752 |
+
ds2 = L[index]
|
| 753 |
+
# Get positions
|
| 754 |
+
pos1 = float(ds1.ImagePositionPatient[2])
|
| 755 |
+
pos2 = float(ds2.ImagePositionPatient[2])
|
| 756 |
+
# Update distance_sum to calculate distance later
|
| 757 |
+
distance_sum += abs(pos1 - pos2)
|
| 758 |
+
# Test measures
|
| 759 |
+
dimensions2 = ds2.Rows, ds2.Columns
|
| 760 |
+
# sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1])
|
| 761 |
+
sampling2 = ds2.info["sampling"][:2] # row, column
|
| 762 |
+
if dimensions != dimensions2:
|
| 763 |
+
# We cannot produce a volume if the dimensions match
|
| 764 |
+
raise ValueError("Dimensions of slices does not match.")
|
| 765 |
+
if sampling != sampling2:
|
| 766 |
+
# We can still produce a volume, but we should notify the user
|
| 767 |
+
self._progressIndicator.write("Warn: sampling does not match.")
|
| 768 |
+
# Store previous
|
| 769 |
+
ds1 = ds2
|
| 770 |
+
|
| 771 |
+
# Finish calculating average distance
|
| 772 |
+
# (Note that there are len(L)-1 distances)
|
| 773 |
+
distance_mean = distance_sum / (len(L) - 1)
|
| 774 |
+
|
| 775 |
+
# Set info dict
|
| 776 |
+
self._info = L[0].info.copy()
|
| 777 |
+
|
| 778 |
+
# Store information that is specific for the serie
|
| 779 |
+
self._info["shape"] = (len(L),) + ds2.info["shape"]
|
| 780 |
+
self._info["sampling"] = (distance_mean,) + ds2.info["sampling"]
|
| 781 |
+
|
| 782 |
+
|
| 783 |
+
def list_files(files, path):
|
| 784 |
+
"""List all files in the directory, recursively."""
|
| 785 |
+
for item in os.listdir(path):
|
| 786 |
+
item = os.path.join(path, item)
|
| 787 |
+
if os.path.isdir(item):
|
| 788 |
+
list_files(files, item)
|
| 789 |
+
elif os.path.isfile(item):
|
| 790 |
+
files.append(item)
|
| 791 |
+
|
| 792 |
+
|
| 793 |
+
def process_directory(request, progressIndicator, readPixelData=False):
|
| 794 |
+
"""
|
| 795 |
+
Reads dicom files and returns a list of DicomSeries objects, which
|
| 796 |
+
contain information about the data, and can be used to load the
|
| 797 |
+
image or volume data.
|
| 798 |
+
|
| 799 |
+
if readPixelData is True, the pixel data of all series is read. By
|
| 800 |
+
default the loading of pixeldata is deferred until it is requested
|
| 801 |
+
using the DicomSeries.get_pixel_array() method. In general, both
|
| 802 |
+
methods should be equally fast.
|
| 803 |
+
"""
|
| 804 |
+
# Get directory to examine
|
| 805 |
+
if os.path.isdir(request.filename):
|
| 806 |
+
path = request.filename
|
| 807 |
+
elif os.path.isfile(request.filename):
|
| 808 |
+
path = os.path.dirname(request.filename)
|
| 809 |
+
else: # pragma: no cover - tested earlier
|
| 810 |
+
raise ValueError("Dicom plugin needs a valid filename to examine the directory")
|
| 811 |
+
|
| 812 |
+
# Check files
|
| 813 |
+
files = []
|
| 814 |
+
list_files(files, path) # Find files recursively
|
| 815 |
+
|
| 816 |
+
# Gather file data and put in DicomSeries
|
| 817 |
+
series = {}
|
| 818 |
+
count = 0
|
| 819 |
+
progressIndicator.start("examining files", "files", len(files))
|
| 820 |
+
for filename in files:
|
| 821 |
+
# Show progress (note that we always start with a 0.0)
|
| 822 |
+
count += 1
|
| 823 |
+
progressIndicator.set_progress(count)
|
| 824 |
+
# Skip DICOMDIR files
|
| 825 |
+
if filename.count("DICOMDIR"): # pragma: no cover
|
| 826 |
+
continue
|
| 827 |
+
# Try loading dicom ...
|
| 828 |
+
try:
|
| 829 |
+
dcm = SimpleDicomReader(filename)
|
| 830 |
+
except NotADicomFile:
|
| 831 |
+
continue # skip non-dicom file
|
| 832 |
+
except Exception as why: # pragma: no cover
|
| 833 |
+
progressIndicator.write(str(why))
|
| 834 |
+
continue
|
| 835 |
+
# Get SUID and register the file with an existing or new series object
|
| 836 |
+
try:
|
| 837 |
+
suid = dcm.SeriesInstanceUID
|
| 838 |
+
except AttributeError: # pragma: no cover
|
| 839 |
+
continue # some other kind of dicom file
|
| 840 |
+
if suid not in series:
|
| 841 |
+
series[suid] = DicomSeries(suid, progressIndicator)
|
| 842 |
+
series[suid]._append(dcm)
|
| 843 |
+
|
| 844 |
+
# Finish progress
|
| 845 |
+
# progressIndicator.finish('Found %i series.' % len(series))
|
| 846 |
+
|
| 847 |
+
# Make a list and sort, so that the order is deterministic
|
| 848 |
+
series = list(series.values())
|
| 849 |
+
series.sort(key=lambda x: x.suid)
|
| 850 |
+
|
| 851 |
+
# Split series if necessary
|
| 852 |
+
for serie in reversed([serie for serie in series]):
|
| 853 |
+
splitSerieIfRequired(serie, series, progressIndicator)
|
| 854 |
+
|
| 855 |
+
# Finish all series
|
| 856 |
+
# progressIndicator.start('analyse series', '', len(series))
|
| 857 |
+
series_ = []
|
| 858 |
+
for i in range(len(series)):
|
| 859 |
+
try:
|
| 860 |
+
series[i]._finish()
|
| 861 |
+
series_.append(series[i])
|
| 862 |
+
except Exception as err: # pragma: no cover
|
| 863 |
+
progressIndicator.write(str(err))
|
| 864 |
+
pass # Skip serie (probably report-like file without pixels)
|
| 865 |
+
# progressIndicator.set_progress(i+1)
|
| 866 |
+
progressIndicator.finish("Found %i correct series." % len(series_))
|
| 867 |
+
|
| 868 |
+
# Done
|
| 869 |
+
return series_
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
def splitSerieIfRequired(serie, series, progressIndicator):
|
| 873 |
+
"""
|
| 874 |
+
Split the serie in multiple series if this is required. The choice
|
| 875 |
+
is based on examing the image position relative to the previous
|
| 876 |
+
image. If it differs too much, it is assumed that there is a new
|
| 877 |
+
dataset. This can happen for example in unspitted gated CT data.
|
| 878 |
+
"""
|
| 879 |
+
|
| 880 |
+
# Sort the original list and get local name
|
| 881 |
+
serie._sort()
|
| 882 |
+
L = serie._entries
|
| 883 |
+
# Init previous slice
|
| 884 |
+
ds1 = L[0]
|
| 885 |
+
# Check whether we can do this
|
| 886 |
+
if "ImagePositionPatient" not in ds1:
|
| 887 |
+
return
|
| 888 |
+
# Initialize a list of new lists
|
| 889 |
+
L2 = [[ds1]]
|
| 890 |
+
# Init slice distance estimate
|
| 891 |
+
distance = 0
|
| 892 |
+
|
| 893 |
+
for index in range(1, len(L)):
|
| 894 |
+
# Get current slice
|
| 895 |
+
ds2 = L[index]
|
| 896 |
+
# Get positions
|
| 897 |
+
pos1 = float(ds1.ImagePositionPatient[2])
|
| 898 |
+
pos2 = float(ds2.ImagePositionPatient[2])
|
| 899 |
+
# Get distances
|
| 900 |
+
newDist = abs(pos1 - pos2)
|
| 901 |
+
# deltaDist = abs(firstPos-pos2)
|
| 902 |
+
# If the distance deviates more than 2x from what we've seen,
|
| 903 |
+
# we can agree it's a new dataset.
|
| 904 |
+
if distance and newDist > 2.1 * distance:
|
| 905 |
+
L2.append([])
|
| 906 |
+
distance = 0
|
| 907 |
+
else:
|
| 908 |
+
# Test missing file
|
| 909 |
+
if distance and newDist > 1.5 * distance:
|
| 910 |
+
progressIndicator.write(
|
| 911 |
+
"Warning: missing file after %r" % ds1._filename
|
| 912 |
+
)
|
| 913 |
+
distance = newDist
|
| 914 |
+
# Add to last list
|
| 915 |
+
L2[-1].append(ds2)
|
| 916 |
+
# Store previous
|
| 917 |
+
ds1 = ds2
|
| 918 |
+
|
| 919 |
+
# Split if we should
|
| 920 |
+
if len(L2) > 1:
|
| 921 |
+
# At what position are we now?
|
| 922 |
+
i = series.index(serie)
|
| 923 |
+
# Create new series
|
| 924 |
+
series2insert = []
|
| 925 |
+
for L in L2:
|
| 926 |
+
newSerie = DicomSeries(serie.suid, progressIndicator)
|
| 927 |
+
newSerie._entries = L
|
| 928 |
+
series2insert.append(newSerie)
|
| 929 |
+
# Insert series and remove self
|
| 930 |
+
for newSerie in reversed(series2insert):
|
| 931 |
+
series.insert(i, newSerie)
|
| 932 |
+
series.remove(serie)
|
parrot/lib/python3.10/site-packages/imageio/plugins/_freeimage.py
ADDED
|
@@ -0,0 +1,1312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 3 |
+
|
| 4 |
+
# styletest: ignore E261
|
| 5 |
+
|
| 6 |
+
""" Module imageio/freeimage.py
|
| 7 |
+
|
| 8 |
+
This module contains the wrapper code for the freeimage library.
|
| 9 |
+
The functions defined in this module are relatively thin; just thin
|
| 10 |
+
enough so that arguments and results are native Python/numpy data
|
| 11 |
+
types.
|
| 12 |
+
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import sys
|
| 17 |
+
import ctypes
|
| 18 |
+
import threading
|
| 19 |
+
import logging
|
| 20 |
+
import numpy
|
| 21 |
+
|
| 22 |
+
from ..core import (
|
| 23 |
+
get_remote_file,
|
| 24 |
+
load_lib,
|
| 25 |
+
Dict,
|
| 26 |
+
resource_dirs,
|
| 27 |
+
IS_PYPY,
|
| 28 |
+
get_platform,
|
| 29 |
+
InternetNotAllowedError,
|
| 30 |
+
NeedDownloadError,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
TEST_NUMPY_NO_STRIDES = False # To test pypy fallback
|
| 36 |
+
|
| 37 |
+
FNAME_PER_PLATFORM = {
|
| 38 |
+
"osx32": "libfreeimage-3.16.0-osx10.6.dylib", # universal library
|
| 39 |
+
"osx64": "libfreeimage-3.16.0-osx10.6.dylib",
|
| 40 |
+
"win32": "FreeImage-3.18.0-win32.dll",
|
| 41 |
+
"win64": "FreeImage-3.18.0-win64.dll",
|
| 42 |
+
"linux32": "libfreeimage-3.16.0-linux32.so",
|
| 43 |
+
"linux64": "libfreeimage-3.16.0-linux64.so",
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def download(directory=None, force_download=False):
|
| 48 |
+
"""Download the FreeImage library to your computer.
|
| 49 |
+
|
| 50 |
+
Parameters
|
| 51 |
+
----------
|
| 52 |
+
directory : str | None
|
| 53 |
+
The directory where the file will be cached if a download was
|
| 54 |
+
required to obtain the file. By default, the appdata directory
|
| 55 |
+
is used. This is also the first directory that is checked for
|
| 56 |
+
a local version of the file.
|
| 57 |
+
force_download : bool | str
|
| 58 |
+
If True, the file will be downloaded even if a local copy exists
|
| 59 |
+
(and this copy will be overwritten). Can also be a YYYY-MM-DD date
|
| 60 |
+
to ensure a file is up-to-date (modified date of a file on disk,
|
| 61 |
+
if present, is checked).
|
| 62 |
+
"""
|
| 63 |
+
plat = get_platform()
|
| 64 |
+
if plat and plat in FNAME_PER_PLATFORM:
|
| 65 |
+
fname = "freeimage/" + FNAME_PER_PLATFORM[plat]
|
| 66 |
+
get_remote_file(fname=fname, directory=directory, force_download=force_download)
|
| 67 |
+
fi._lib = None # allow trying again (needed to make tests work)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_freeimage_lib():
|
| 71 |
+
"""Ensure we have our version of the binary freeimage lib."""
|
| 72 |
+
|
| 73 |
+
lib = os.getenv("IMAGEIO_FREEIMAGE_LIB", None)
|
| 74 |
+
if lib: # pragma: no cover
|
| 75 |
+
return lib
|
| 76 |
+
|
| 77 |
+
# Get filename to load
|
| 78 |
+
# If we do not provide a binary, the system may still do ...
|
| 79 |
+
plat = get_platform()
|
| 80 |
+
if plat and plat in FNAME_PER_PLATFORM:
|
| 81 |
+
try:
|
| 82 |
+
return get_remote_file("freeimage/" + FNAME_PER_PLATFORM[plat], auto=False)
|
| 83 |
+
except InternetNotAllowedError:
|
| 84 |
+
pass
|
| 85 |
+
except NeedDownloadError:
|
| 86 |
+
raise NeedDownloadError(
|
| 87 |
+
"Need FreeImage library. "
|
| 88 |
+
"You can obtain it with either:\n"
|
| 89 |
+
" - download using the command: "
|
| 90 |
+
"imageio_download_bin freeimage\n"
|
| 91 |
+
" - download by calling (in Python): "
|
| 92 |
+
"imageio.plugins.freeimage.download()\n"
|
| 93 |
+
)
|
| 94 |
+
except RuntimeError as e: # pragma: no cover
|
| 95 |
+
logger.warning(str(e))
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# Define function to encode a filename to bytes (for the current system)
|
| 99 |
+
def efn(x):
|
| 100 |
+
return x.encode(sys.getfilesystemencoding())
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# 4-byte quads of 0,v,v,v from 0,0,0,0 to 0,255,255,255
|
| 104 |
+
GREY_PALETTE = numpy.arange(0, 0x01000000, 0x00010101, dtype=numpy.uint32)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class FI_TYPES(object):
|
| 108 |
+
FIT_UNKNOWN = 0
|
| 109 |
+
FIT_BITMAP = 1
|
| 110 |
+
FIT_UINT16 = 2
|
| 111 |
+
FIT_INT16 = 3
|
| 112 |
+
FIT_UINT32 = 4
|
| 113 |
+
FIT_INT32 = 5
|
| 114 |
+
FIT_FLOAT = 6
|
| 115 |
+
FIT_DOUBLE = 7
|
| 116 |
+
FIT_COMPLEX = 8
|
| 117 |
+
FIT_RGB16 = 9
|
| 118 |
+
FIT_RGBA16 = 10
|
| 119 |
+
FIT_RGBF = 11
|
| 120 |
+
FIT_RGBAF = 12
|
| 121 |
+
|
| 122 |
+
dtypes = {
|
| 123 |
+
FIT_BITMAP: numpy.uint8,
|
| 124 |
+
FIT_UINT16: numpy.uint16,
|
| 125 |
+
FIT_INT16: numpy.int16,
|
| 126 |
+
FIT_UINT32: numpy.uint32,
|
| 127 |
+
FIT_INT32: numpy.int32,
|
| 128 |
+
FIT_FLOAT: numpy.float32,
|
| 129 |
+
FIT_DOUBLE: numpy.float64,
|
| 130 |
+
FIT_COMPLEX: numpy.complex128,
|
| 131 |
+
FIT_RGB16: numpy.uint16,
|
| 132 |
+
FIT_RGBA16: numpy.uint16,
|
| 133 |
+
FIT_RGBF: numpy.float32,
|
| 134 |
+
FIT_RGBAF: numpy.float32,
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
fi_types = {
|
| 138 |
+
(numpy.uint8, 1): FIT_BITMAP,
|
| 139 |
+
(numpy.uint8, 3): FIT_BITMAP,
|
| 140 |
+
(numpy.uint8, 4): FIT_BITMAP,
|
| 141 |
+
(numpy.uint16, 1): FIT_UINT16,
|
| 142 |
+
(numpy.int16, 1): FIT_INT16,
|
| 143 |
+
(numpy.uint32, 1): FIT_UINT32,
|
| 144 |
+
(numpy.int32, 1): FIT_INT32,
|
| 145 |
+
(numpy.float32, 1): FIT_FLOAT,
|
| 146 |
+
(numpy.float64, 1): FIT_DOUBLE,
|
| 147 |
+
(numpy.complex128, 1): FIT_COMPLEX,
|
| 148 |
+
(numpy.uint16, 3): FIT_RGB16,
|
| 149 |
+
(numpy.uint16, 4): FIT_RGBA16,
|
| 150 |
+
(numpy.float32, 3): FIT_RGBF,
|
| 151 |
+
(numpy.float32, 4): FIT_RGBAF,
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
extra_dims = {
|
| 155 |
+
FIT_UINT16: [],
|
| 156 |
+
FIT_INT16: [],
|
| 157 |
+
FIT_UINT32: [],
|
| 158 |
+
FIT_INT32: [],
|
| 159 |
+
FIT_FLOAT: [],
|
| 160 |
+
FIT_DOUBLE: [],
|
| 161 |
+
FIT_COMPLEX: [],
|
| 162 |
+
FIT_RGB16: [3],
|
| 163 |
+
FIT_RGBA16: [4],
|
| 164 |
+
FIT_RGBF: [3],
|
| 165 |
+
FIT_RGBAF: [4],
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class IO_FLAGS(object):
|
| 170 |
+
FIF_LOAD_NOPIXELS = 0x8000 # loading: load the image header only
|
| 171 |
+
# # (not supported by all plugins)
|
| 172 |
+
BMP_DEFAULT = 0
|
| 173 |
+
BMP_SAVE_RLE = 1
|
| 174 |
+
CUT_DEFAULT = 0
|
| 175 |
+
DDS_DEFAULT = 0
|
| 176 |
+
EXR_DEFAULT = 0 # save data as half with piz-based wavelet compression
|
| 177 |
+
EXR_FLOAT = 0x0001 # save data as float instead of half (not recommended)
|
| 178 |
+
EXR_NONE = 0x0002 # save with no compression
|
| 179 |
+
EXR_ZIP = 0x0004 # save with zlib compression, in blocks of 16 scan lines
|
| 180 |
+
EXR_PIZ = 0x0008 # save with piz-based wavelet compression
|
| 181 |
+
EXR_PXR24 = 0x0010 # save with lossy 24-bit float compression
|
| 182 |
+
EXR_B44 = 0x0020 # save with lossy 44% float compression
|
| 183 |
+
# # - goes to 22% when combined with EXR_LC
|
| 184 |
+
EXR_LC = 0x0040 # save images with one luminance and two chroma channels,
|
| 185 |
+
# # rather than as RGB (lossy compression)
|
| 186 |
+
FAXG3_DEFAULT = 0
|
| 187 |
+
GIF_DEFAULT = 0
|
| 188 |
+
GIF_LOAD256 = 1 # Load the image as a 256 color image with ununsed
|
| 189 |
+
# # palette entries, if it's 16 or 2 color
|
| 190 |
+
GIF_PLAYBACK = 2 # 'Play' the GIF to generate each frame (as 32bpp)
|
| 191 |
+
# # instead of returning raw frame data when loading
|
| 192 |
+
HDR_DEFAULT = 0
|
| 193 |
+
ICO_DEFAULT = 0
|
| 194 |
+
ICO_MAKEALPHA = 1 # convert to 32bpp and create an alpha channel from the
|
| 195 |
+
# # AND-mask when loading
|
| 196 |
+
IFF_DEFAULT = 0
|
| 197 |
+
J2K_DEFAULT = 0 # save with a 16:1 rate
|
| 198 |
+
JP2_DEFAULT = 0 # save with a 16:1 rate
|
| 199 |
+
JPEG_DEFAULT = 0 # loading (see JPEG_FAST);
|
| 200 |
+
# # saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420)
|
| 201 |
+
JPEG_FAST = 0x0001 # load the file as fast as possible,
|
| 202 |
+
# # sacrificing some quality
|
| 203 |
+
JPEG_ACCURATE = 0x0002 # load the file with the best quality,
|
| 204 |
+
# # sacrificing some speed
|
| 205 |
+
JPEG_CMYK = 0x0004 # load separated CMYK "as is"
|
| 206 |
+
# # (use | to combine with other load flags)
|
| 207 |
+
JPEG_EXIFROTATE = 0x0008 # load and rotate according to
|
| 208 |
+
# # Exif 'Orientation' tag if available
|
| 209 |
+
JPEG_QUALITYSUPERB = 0x80 # save with superb quality (100:1)
|
| 210 |
+
JPEG_QUALITYGOOD = 0x0100 # save with good quality (75:1)
|
| 211 |
+
JPEG_QUALITYNORMAL = 0x0200 # save with normal quality (50:1)
|
| 212 |
+
JPEG_QUALITYAVERAGE = 0x0400 # save with average quality (25:1)
|
| 213 |
+
JPEG_QUALITYBAD = 0x0800 # save with bad quality (10:1)
|
| 214 |
+
JPEG_PROGRESSIVE = 0x2000 # save as a progressive-JPEG
|
| 215 |
+
# # (use | to combine with other save flags)
|
| 216 |
+
JPEG_SUBSAMPLING_411 = 0x1000 # save with high 4x1 chroma
|
| 217 |
+
# # subsampling (4:1:1)
|
| 218 |
+
JPEG_SUBSAMPLING_420 = 0x4000 # save with medium 2x2 medium chroma
|
| 219 |
+
# # subsampling (4:2:0) - default value
|
| 220 |
+
JPEG_SUBSAMPLING_422 = 0x8000 # save /w low 2x1 chroma subsampling (4:2:2)
|
| 221 |
+
JPEG_SUBSAMPLING_444 = 0x10000 # save with no chroma subsampling (4:4:4)
|
| 222 |
+
JPEG_OPTIMIZE = 0x20000 # on saving, compute optimal Huffman coding tables
|
| 223 |
+
# # (can reduce a few percent of file size)
|
| 224 |
+
JPEG_BASELINE = 0x40000 # save basic JPEG, without metadata or any markers
|
| 225 |
+
KOALA_DEFAULT = 0
|
| 226 |
+
LBM_DEFAULT = 0
|
| 227 |
+
MNG_DEFAULT = 0
|
| 228 |
+
PCD_DEFAULT = 0
|
| 229 |
+
PCD_BASE = 1 # load the bitmap sized 768 x 512
|
| 230 |
+
PCD_BASEDIV4 = 2 # load the bitmap sized 384 x 256
|
| 231 |
+
PCD_BASEDIV16 = 3 # load the bitmap sized 192 x 128
|
| 232 |
+
PCX_DEFAULT = 0
|
| 233 |
+
PFM_DEFAULT = 0
|
| 234 |
+
PICT_DEFAULT = 0
|
| 235 |
+
PNG_DEFAULT = 0
|
| 236 |
+
PNG_IGNOREGAMMA = 1 # loading: avoid gamma correction
|
| 237 |
+
PNG_Z_BEST_SPEED = 0x0001 # save using ZLib level 1 compression flag
|
| 238 |
+
# # (default value is 6)
|
| 239 |
+
PNG_Z_DEFAULT_COMPRESSION = 0x0006 # save using ZLib level 6 compression
|
| 240 |
+
# # flag (default recommended value)
|
| 241 |
+
PNG_Z_BEST_COMPRESSION = 0x0009 # save using ZLib level 9 compression flag
|
| 242 |
+
# # (default value is 6)
|
| 243 |
+
PNG_Z_NO_COMPRESSION = 0x0100 # save without ZLib compression
|
| 244 |
+
PNG_INTERLACED = 0x0200 # save using Adam7 interlacing (use | to combine
|
| 245 |
+
# # with other save flags)
|
| 246 |
+
PNM_DEFAULT = 0
|
| 247 |
+
PNM_SAVE_RAW = 0 # Writer saves in RAW format (i.e. P4, P5 or P6)
|
| 248 |
+
PNM_SAVE_ASCII = 1 # Writer saves in ASCII format (i.e. P1, P2 or P3)
|
| 249 |
+
PSD_DEFAULT = 0
|
| 250 |
+
PSD_CMYK = 1 # reads tags for separated CMYK (default is conversion to RGB)
|
| 251 |
+
PSD_LAB = 2 # reads tags for CIELab (default is conversion to RGB)
|
| 252 |
+
RAS_DEFAULT = 0
|
| 253 |
+
RAW_DEFAULT = 0 # load the file as linear RGB 48-bit
|
| 254 |
+
RAW_PREVIEW = 1 # try to load the embedded JPEG preview with included
|
| 255 |
+
# # Exif Data or default to RGB 24-bit
|
| 256 |
+
RAW_DISPLAY = 2 # load the file as RGB 24-bit
|
| 257 |
+
SGI_DEFAULT = 0
|
| 258 |
+
TARGA_DEFAULT = 0
|
| 259 |
+
TARGA_LOAD_RGB888 = 1 # Convert RGB555 and ARGB8888 -> RGB888.
|
| 260 |
+
TARGA_SAVE_RLE = 2 # Save with RLE compression
|
| 261 |
+
TIFF_DEFAULT = 0
|
| 262 |
+
TIFF_CMYK = 0x0001 # reads/stores tags for separated CMYK
|
| 263 |
+
# # (use | to combine with compression flags)
|
| 264 |
+
TIFF_PACKBITS = 0x0100 # save using PACKBITS compression
|
| 265 |
+
TIFF_DEFLATE = 0x0200 # save using DEFLATE (a.k.a. ZLIB) compression
|
| 266 |
+
TIFF_ADOBE_DEFLATE = 0x0400 # save using ADOBE DEFLATE compression
|
| 267 |
+
TIFF_NONE = 0x0800 # save without any compression
|
| 268 |
+
TIFF_CCITTFAX3 = 0x1000 # save using CCITT Group 3 fax encoding
|
| 269 |
+
TIFF_CCITTFAX4 = 0x2000 # save using CCITT Group 4 fax encoding
|
| 270 |
+
TIFF_LZW = 0x4000 # save using LZW compression
|
| 271 |
+
TIFF_JPEG = 0x8000 # save using JPEG compression
|
| 272 |
+
TIFF_LOGLUV = 0x10000 # save using LogLuv compression
|
| 273 |
+
WBMP_DEFAULT = 0
|
| 274 |
+
XBM_DEFAULT = 0
|
| 275 |
+
XPM_DEFAULT = 0
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class METADATA_MODELS(object):
|
| 279 |
+
FIMD_COMMENTS = 0
|
| 280 |
+
FIMD_EXIF_MAIN = 1
|
| 281 |
+
FIMD_EXIF_EXIF = 2
|
| 282 |
+
FIMD_EXIF_GPS = 3
|
| 283 |
+
FIMD_EXIF_MAKERNOTE = 4
|
| 284 |
+
FIMD_EXIF_INTEROP = 5
|
| 285 |
+
FIMD_IPTC = 6
|
| 286 |
+
FIMD_XMP = 7
|
| 287 |
+
FIMD_GEOTIFF = 8
|
| 288 |
+
FIMD_ANIMATION = 9
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class METADATA_DATATYPE(object):
|
| 292 |
+
FIDT_BYTE = 1 # 8-bit unsigned integer
|
| 293 |
+
FIDT_ASCII = 2 # 8-bit bytes w/ last byte null
|
| 294 |
+
FIDT_SHORT = 3 # 16-bit unsigned integer
|
| 295 |
+
FIDT_LONG = 4 # 32-bit unsigned integer
|
| 296 |
+
FIDT_RATIONAL = 5 # 64-bit unsigned fraction
|
| 297 |
+
FIDT_SBYTE = 6 # 8-bit signed integer
|
| 298 |
+
FIDT_UNDEFINED = 7 # 8-bit untyped data
|
| 299 |
+
FIDT_SSHORT = 8 # 16-bit signed integer
|
| 300 |
+
FIDT_SLONG = 9 # 32-bit signed integer
|
| 301 |
+
FIDT_SRATIONAL = 10 # 64-bit signed fraction
|
| 302 |
+
FIDT_FLOAT = 11 # 32-bit IEEE floating point
|
| 303 |
+
FIDT_DOUBLE = 12 # 64-bit IEEE floating point
|
| 304 |
+
FIDT_IFD = 13 # 32-bit unsigned integer (offset)
|
| 305 |
+
FIDT_PALETTE = 14 # 32-bit RGBQUAD
|
| 306 |
+
FIDT_LONG8 = 16 # 64-bit unsigned integer
|
| 307 |
+
FIDT_SLONG8 = 17 # 64-bit signed integer
|
| 308 |
+
FIDT_IFD8 = 18 # 64-bit unsigned integer (offset)
|
| 309 |
+
|
| 310 |
+
dtypes = {
|
| 311 |
+
FIDT_BYTE: numpy.uint8,
|
| 312 |
+
FIDT_SHORT: numpy.uint16,
|
| 313 |
+
FIDT_LONG: numpy.uint32,
|
| 314 |
+
FIDT_RATIONAL: [("numerator", numpy.uint32), ("denominator", numpy.uint32)],
|
| 315 |
+
FIDT_LONG8: numpy.uint64,
|
| 316 |
+
FIDT_SLONG8: numpy.int64,
|
| 317 |
+
FIDT_IFD8: numpy.uint64,
|
| 318 |
+
FIDT_SBYTE: numpy.int8,
|
| 319 |
+
FIDT_UNDEFINED: numpy.uint8,
|
| 320 |
+
FIDT_SSHORT: numpy.int16,
|
| 321 |
+
FIDT_SLONG: numpy.int32,
|
| 322 |
+
FIDT_SRATIONAL: [("numerator", numpy.int32), ("denominator", numpy.int32)],
|
| 323 |
+
FIDT_FLOAT: numpy.float32,
|
| 324 |
+
FIDT_DOUBLE: numpy.float64,
|
| 325 |
+
FIDT_IFD: numpy.uint32,
|
| 326 |
+
FIDT_PALETTE: [
|
| 327 |
+
("R", numpy.uint8),
|
| 328 |
+
("G", numpy.uint8),
|
| 329 |
+
("B", numpy.uint8),
|
| 330 |
+
("A", numpy.uint8),
|
| 331 |
+
],
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
class Freeimage(object):
|
| 336 |
+
"""Class to represent an interface to the FreeImage library.
|
| 337 |
+
This class is relatively thin. It provides a Pythonic API that converts
|
| 338 |
+
Freeimage objects to Python objects, but that's about it.
|
| 339 |
+
The actual implementation should be provided by the plugins.
|
| 340 |
+
|
| 341 |
+
The recommended way to call into the Freeimage library (so that
|
| 342 |
+
errors and warnings show up in the right moment) is to use this
|
| 343 |
+
object as a context manager:
|
| 344 |
+
with imageio.fi as lib:
|
| 345 |
+
lib.FreeImage_GetPalette()
|
| 346 |
+
|
| 347 |
+
"""
|
| 348 |
+
|
| 349 |
+
_API = {
|
| 350 |
+
# All we're doing here is telling ctypes that some of the
|
| 351 |
+
# FreeImage functions return pointers instead of integers. (On
|
| 352 |
+
# 64-bit systems, without this information the pointers get
|
| 353 |
+
# truncated and crashes result). There's no need to list
|
| 354 |
+
# functions that return ints, or the types of the parameters
|
| 355 |
+
# to these or other functions -- that's fine to do implicitly.
|
| 356 |
+
# Note that the ctypes immediately converts the returned void_p
|
| 357 |
+
# back to a python int again! This is really not helpful,
|
| 358 |
+
# because then passing it back to another library call will
|
| 359 |
+
# cause truncation-to-32-bits on 64-bit systems. Thanks, ctypes!
|
| 360 |
+
# So after these calls one must immediately re-wrap the int as
|
| 361 |
+
# a c_void_p if it is to be passed back into FreeImage.
|
| 362 |
+
"FreeImage_AllocateT": (ctypes.c_void_p, None),
|
| 363 |
+
"FreeImage_FindFirstMetadata": (ctypes.c_void_p, None),
|
| 364 |
+
"FreeImage_GetBits": (ctypes.c_void_p, None),
|
| 365 |
+
"FreeImage_GetPalette": (ctypes.c_void_p, None),
|
| 366 |
+
"FreeImage_GetTagKey": (ctypes.c_char_p, None),
|
| 367 |
+
"FreeImage_GetTagValue": (ctypes.c_void_p, None),
|
| 368 |
+
"FreeImage_CreateTag": (ctypes.c_void_p, None),
|
| 369 |
+
"FreeImage_Save": (ctypes.c_void_p, None),
|
| 370 |
+
"FreeImage_Load": (ctypes.c_void_p, None),
|
| 371 |
+
"FreeImage_LoadFromMemory": (ctypes.c_void_p, None),
|
| 372 |
+
"FreeImage_OpenMultiBitmap": (ctypes.c_void_p, None),
|
| 373 |
+
"FreeImage_LoadMultiBitmapFromMemory": (ctypes.c_void_p, None),
|
| 374 |
+
"FreeImage_LockPage": (ctypes.c_void_p, None),
|
| 375 |
+
"FreeImage_OpenMemory": (ctypes.c_void_p, None),
|
| 376 |
+
# 'FreeImage_ReadMemory': (ctypes.c_void_p, None),
|
| 377 |
+
# 'FreeImage_CloseMemory': (ctypes.c_void_p, None),
|
| 378 |
+
"FreeImage_GetVersion": (ctypes.c_char_p, None),
|
| 379 |
+
"FreeImage_GetFIFExtensionList": (ctypes.c_char_p, None),
|
| 380 |
+
"FreeImage_GetFormatFromFIF": (ctypes.c_char_p, None),
|
| 381 |
+
"FreeImage_GetFIFDescription": (ctypes.c_char_p, None),
|
| 382 |
+
"FreeImage_ColorQuantizeEx": (ctypes.c_void_p, None),
|
| 383 |
+
# Pypy wants some extra definitions, so here we go ...
|
| 384 |
+
"FreeImage_IsLittleEndian": (ctypes.c_int, None),
|
| 385 |
+
"FreeImage_SetOutputMessage": (ctypes.c_void_p, None),
|
| 386 |
+
"FreeImage_GetFIFCount": (ctypes.c_int, None),
|
| 387 |
+
"FreeImage_IsPluginEnabled": (ctypes.c_int, None),
|
| 388 |
+
"FreeImage_GetFileType": (ctypes.c_int, None),
|
| 389 |
+
#
|
| 390 |
+
"FreeImage_GetTagType": (ctypes.c_int, None),
|
| 391 |
+
"FreeImage_GetTagLength": (ctypes.c_int, None),
|
| 392 |
+
"FreeImage_FindNextMetadata": (ctypes.c_int, None),
|
| 393 |
+
"FreeImage_FindCloseMetadata": (ctypes.c_void_p, None),
|
| 394 |
+
#
|
| 395 |
+
"FreeImage_GetFIFFromFilename": (ctypes.c_int, None),
|
| 396 |
+
"FreeImage_FIFSupportsReading": (ctypes.c_int, None),
|
| 397 |
+
"FreeImage_FIFSupportsWriting": (ctypes.c_int, None),
|
| 398 |
+
"FreeImage_FIFSupportsExportType": (ctypes.c_int, None),
|
| 399 |
+
"FreeImage_FIFSupportsExportBPP": (ctypes.c_int, None),
|
| 400 |
+
"FreeImage_GetHeight": (ctypes.c_int, None),
|
| 401 |
+
"FreeImage_GetWidth": (ctypes.c_int, None),
|
| 402 |
+
"FreeImage_GetImageType": (ctypes.c_int, None),
|
| 403 |
+
"FreeImage_GetBPP": (ctypes.c_int, None),
|
| 404 |
+
"FreeImage_GetColorsUsed": (ctypes.c_int, None),
|
| 405 |
+
"FreeImage_ConvertTo32Bits": (ctypes.c_void_p, None),
|
| 406 |
+
"FreeImage_GetPitch": (ctypes.c_int, None),
|
| 407 |
+
"FreeImage_Unload": (ctypes.c_void_p, None),
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
def __init__(self):
|
| 411 |
+
# Initialize freeimage lib as None
|
| 412 |
+
self._lib = None
|
| 413 |
+
|
| 414 |
+
# A lock to create thread-safety
|
| 415 |
+
self._lock = threading.RLock()
|
| 416 |
+
|
| 417 |
+
# Init log messages lists
|
| 418 |
+
self._messages = []
|
| 419 |
+
|
| 420 |
+
# Select functype for error handler
|
| 421 |
+
if sys.platform.startswith("win"):
|
| 422 |
+
functype = ctypes.WINFUNCTYPE
|
| 423 |
+
else:
|
| 424 |
+
functype = ctypes.CFUNCTYPE
|
| 425 |
+
|
| 426 |
+
# Create output message handler
|
| 427 |
+
@functype(None, ctypes.c_int, ctypes.c_char_p)
|
| 428 |
+
def error_handler(fif, message):
|
| 429 |
+
message = message.decode("utf-8")
|
| 430 |
+
self._messages.append(message)
|
| 431 |
+
while (len(self._messages)) > 256:
|
| 432 |
+
self._messages.pop(0)
|
| 433 |
+
|
| 434 |
+
# Make sure to keep a ref to function
|
| 435 |
+
self._error_handler = error_handler
|
| 436 |
+
|
| 437 |
+
@property
|
| 438 |
+
def lib(self):
|
| 439 |
+
if self._lib is None:
|
| 440 |
+
try:
|
| 441 |
+
self.load_freeimage()
|
| 442 |
+
except OSError as err:
|
| 443 |
+
self._lib = "The freeimage library could not be loaded: "
|
| 444 |
+
self._lib += str(err)
|
| 445 |
+
if isinstance(self._lib, str):
|
| 446 |
+
raise RuntimeError(self._lib)
|
| 447 |
+
return self._lib
|
| 448 |
+
|
| 449 |
+
def has_lib(self):
|
| 450 |
+
try:
|
| 451 |
+
self.lib
|
| 452 |
+
except Exception:
|
| 453 |
+
return False
|
| 454 |
+
return True
|
| 455 |
+
|
| 456 |
+
def load_freeimage(self):
|
| 457 |
+
"""Try to load the freeimage lib from the system. If not successful,
|
| 458 |
+
try to download the imageio version and try again.
|
| 459 |
+
"""
|
| 460 |
+
# Load library and register API
|
| 461 |
+
success = False
|
| 462 |
+
try:
|
| 463 |
+
# Try without forcing a download, but giving preference
|
| 464 |
+
# to the imageio-provided lib (if previously downloaded)
|
| 465 |
+
self._load_freeimage()
|
| 466 |
+
self._register_api()
|
| 467 |
+
if self.lib.FreeImage_GetVersion().decode("utf-8") >= "3.15":
|
| 468 |
+
success = True
|
| 469 |
+
except OSError:
|
| 470 |
+
pass
|
| 471 |
+
|
| 472 |
+
if not success:
|
| 473 |
+
# Ensure we have our own lib, try again
|
| 474 |
+
get_freeimage_lib()
|
| 475 |
+
self._load_freeimage()
|
| 476 |
+
self._register_api()
|
| 477 |
+
|
| 478 |
+
# Wrap up
|
| 479 |
+
self.lib.FreeImage_SetOutputMessage(self._error_handler)
|
| 480 |
+
self.lib_version = self.lib.FreeImage_GetVersion().decode("utf-8")
|
| 481 |
+
|
| 482 |
+
def _load_freeimage(self):
|
| 483 |
+
# Define names
|
| 484 |
+
lib_names = ["freeimage", "libfreeimage"]
|
| 485 |
+
exact_lib_names = [
|
| 486 |
+
"FreeImage",
|
| 487 |
+
"libfreeimage.dylib",
|
| 488 |
+
"libfreeimage.so",
|
| 489 |
+
"libfreeimage.so.3",
|
| 490 |
+
]
|
| 491 |
+
# Add names of libraries that we provide (that file may not exist)
|
| 492 |
+
res_dirs = resource_dirs()
|
| 493 |
+
plat = get_platform()
|
| 494 |
+
if plat: # Can be None on e.g. FreeBSD
|
| 495 |
+
fname = FNAME_PER_PLATFORM[plat]
|
| 496 |
+
for dir in res_dirs:
|
| 497 |
+
exact_lib_names.insert(0, os.path.join(dir, "freeimage", fname))
|
| 498 |
+
|
| 499 |
+
# Add the path specified with IMAGEIO_FREEIMAGE_LIB:
|
| 500 |
+
lib = os.getenv("IMAGEIO_FREEIMAGE_LIB", None)
|
| 501 |
+
if lib is not None:
|
| 502 |
+
exact_lib_names.insert(0, lib)
|
| 503 |
+
|
| 504 |
+
# Load
|
| 505 |
+
try:
|
| 506 |
+
lib, fname = load_lib(exact_lib_names, lib_names, res_dirs)
|
| 507 |
+
except OSError as err: # pragma: no cover
|
| 508 |
+
err_msg = str(err) + "\nPlease install the FreeImage library."
|
| 509 |
+
raise OSError(err_msg)
|
| 510 |
+
|
| 511 |
+
# Store
|
| 512 |
+
self._lib = lib
|
| 513 |
+
self.lib_fname = fname
|
| 514 |
+
|
| 515 |
+
def _register_api(self):
|
| 516 |
+
# Albert's ctypes pattern
|
| 517 |
+
for f, (restype, argtypes) in self._API.items():
|
| 518 |
+
func = getattr(self.lib, f)
|
| 519 |
+
func.restype = restype
|
| 520 |
+
func.argtypes = argtypes
|
| 521 |
+
|
| 522 |
+
# Handling of output messages
|
| 523 |
+
|
| 524 |
+
def __enter__(self):
|
| 525 |
+
self._lock.acquire()
|
| 526 |
+
return self.lib
|
| 527 |
+
|
| 528 |
+
def __exit__(self, *args):
|
| 529 |
+
self._show_any_warnings()
|
| 530 |
+
self._lock.release()
|
| 531 |
+
|
| 532 |
+
def _reset_log(self):
|
| 533 |
+
"""Reset the list of output messages. Call this before
|
| 534 |
+
loading or saving an image with the FreeImage API.
|
| 535 |
+
"""
|
| 536 |
+
self._messages = []
|
| 537 |
+
|
| 538 |
+
def _get_error_message(self):
|
| 539 |
+
"""Get the output messages produced since the last reset as
|
| 540 |
+
one string. Returns 'No known reason.' if there are no messages.
|
| 541 |
+
Also resets the log.
|
| 542 |
+
"""
|
| 543 |
+
if self._messages:
|
| 544 |
+
res = " ".join(self._messages)
|
| 545 |
+
self._reset_log()
|
| 546 |
+
return res
|
| 547 |
+
else:
|
| 548 |
+
return "No known reason."
|
| 549 |
+
|
| 550 |
+
def _show_any_warnings(self):
|
| 551 |
+
"""If there were any messages since the last reset, show them
|
| 552 |
+
as a warning. Otherwise do nothing. Also resets the messages.
|
| 553 |
+
"""
|
| 554 |
+
if self._messages:
|
| 555 |
+
logger.warning("imageio.freeimage warning: " + self._get_error_message())
|
| 556 |
+
self._reset_log()
|
| 557 |
+
|
| 558 |
+
def get_output_log(self):
|
| 559 |
+
"""Return a list of the last 256 output messages
|
| 560 |
+
(warnings and errors) produced by the FreeImage library.
|
| 561 |
+
"""
|
| 562 |
+
# This message log is not cleared/reset, but kept to 256 elements.
|
| 563 |
+
return [m for m in self._messages]
|
| 564 |
+
|
| 565 |
+
def getFIF(self, filename, mode, bb=None):
|
| 566 |
+
"""Get the freeimage Format (FIF) from a given filename.
|
| 567 |
+
If mode is 'r', will try to determine the format by reading
|
| 568 |
+
the file, otherwise only the filename is used.
|
| 569 |
+
|
| 570 |
+
This function also tests whether the format supports reading/writing.
|
| 571 |
+
"""
|
| 572 |
+
with self as lib:
|
| 573 |
+
# Init
|
| 574 |
+
ftype = -1
|
| 575 |
+
if mode not in "rw":
|
| 576 |
+
raise ValueError('Invalid mode (must be "r" or "w").')
|
| 577 |
+
|
| 578 |
+
# Try getting format from the content. Note that some files
|
| 579 |
+
# do not have a header that allows reading the format from
|
| 580 |
+
# the file.
|
| 581 |
+
if mode == "r":
|
| 582 |
+
if bb is not None:
|
| 583 |
+
fimemory = lib.FreeImage_OpenMemory(ctypes.c_char_p(bb), len(bb))
|
| 584 |
+
ftype = lib.FreeImage_GetFileTypeFromMemory(
|
| 585 |
+
ctypes.c_void_p(fimemory), len(bb)
|
| 586 |
+
)
|
| 587 |
+
lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory))
|
| 588 |
+
if (ftype == -1) and os.path.isfile(filename):
|
| 589 |
+
ftype = lib.FreeImage_GetFileType(efn(filename), 0)
|
| 590 |
+
# Try getting the format from the extension
|
| 591 |
+
if ftype == -1:
|
| 592 |
+
ftype = lib.FreeImage_GetFIFFromFilename(efn(filename))
|
| 593 |
+
|
| 594 |
+
# Test if ok
|
| 595 |
+
if ftype == -1:
|
| 596 |
+
raise ValueError('Cannot determine format of file "%s"' % filename)
|
| 597 |
+
elif mode == "w" and not lib.FreeImage_FIFSupportsWriting(ftype):
|
| 598 |
+
raise ValueError('Cannot write the format of file "%s"' % filename)
|
| 599 |
+
elif mode == "r" and not lib.FreeImage_FIFSupportsReading(ftype):
|
| 600 |
+
raise ValueError('Cannot read the format of file "%s"' % filename)
|
| 601 |
+
return ftype
|
| 602 |
+
|
| 603 |
+
def create_bitmap(self, filename, ftype, flags=0):
|
| 604 |
+
"""create_bitmap(filename, ftype, flags=0)
|
| 605 |
+
Create a wrapped bitmap object.
|
| 606 |
+
"""
|
| 607 |
+
return FIBitmap(self, filename, ftype, flags)
|
| 608 |
+
|
| 609 |
+
def create_multipage_bitmap(self, filename, ftype, flags=0):
|
| 610 |
+
"""create_multipage_bitmap(filename, ftype, flags=0)
|
| 611 |
+
Create a wrapped multipage bitmap object.
|
| 612 |
+
"""
|
| 613 |
+
return FIMultipageBitmap(self, filename, ftype, flags)
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
class FIBaseBitmap(object):
|
| 617 |
+
def __init__(self, fi, filename, ftype, flags):
|
| 618 |
+
self._fi = fi
|
| 619 |
+
self._filename = filename
|
| 620 |
+
self._ftype = ftype
|
| 621 |
+
self._flags = flags
|
| 622 |
+
self._bitmap = None
|
| 623 |
+
self._close_funcs = []
|
| 624 |
+
|
| 625 |
+
def __del__(self):
|
| 626 |
+
self.close()
|
| 627 |
+
|
| 628 |
+
def close(self):
|
| 629 |
+
if (self._bitmap is not None) and self._close_funcs:
|
| 630 |
+
for close_func in self._close_funcs:
|
| 631 |
+
try:
|
| 632 |
+
with self._fi:
|
| 633 |
+
fun = close_func[0]
|
| 634 |
+
fun(*close_func[1:])
|
| 635 |
+
except Exception: # pragma: no cover
|
| 636 |
+
pass
|
| 637 |
+
self._close_funcs = []
|
| 638 |
+
self._bitmap = None
|
| 639 |
+
|
| 640 |
+
def _set_bitmap(self, bitmap, close_func=None):
|
| 641 |
+
"""Function to set the bitmap and specify the function to unload it."""
|
| 642 |
+
if self._bitmap is not None:
|
| 643 |
+
pass # bitmap is converted
|
| 644 |
+
if close_func is None:
|
| 645 |
+
close_func = self._fi.lib.FreeImage_Unload, bitmap
|
| 646 |
+
|
| 647 |
+
self._bitmap = bitmap
|
| 648 |
+
if close_func:
|
| 649 |
+
self._close_funcs.append(close_func)
|
| 650 |
+
|
| 651 |
+
def get_meta_data(self):
|
| 652 |
+
# todo: there is also FreeImage_TagToString, is that useful?
|
| 653 |
+
# and would that work well when reading and then saving?
|
| 654 |
+
|
| 655 |
+
# Create a list of (model_name, number) tuples
|
| 656 |
+
models = [
|
| 657 |
+
(name[5:], number)
|
| 658 |
+
for name, number in METADATA_MODELS.__dict__.items()
|
| 659 |
+
if name.startswith("FIMD_")
|
| 660 |
+
]
|
| 661 |
+
|
| 662 |
+
# Prepare
|
| 663 |
+
metadata = Dict()
|
| 664 |
+
tag = ctypes.c_void_p()
|
| 665 |
+
|
| 666 |
+
with self._fi as lib:
|
| 667 |
+
# Iterate over all FreeImage meta models
|
| 668 |
+
for model_name, number in models:
|
| 669 |
+
# Find beginning, get search handle
|
| 670 |
+
mdhandle = lib.FreeImage_FindFirstMetadata(
|
| 671 |
+
number, self._bitmap, ctypes.byref(tag)
|
| 672 |
+
)
|
| 673 |
+
mdhandle = ctypes.c_void_p(mdhandle)
|
| 674 |
+
if mdhandle:
|
| 675 |
+
# Iterate over all tags in this model
|
| 676 |
+
more = True
|
| 677 |
+
while more:
|
| 678 |
+
# Get info about tag
|
| 679 |
+
tag_name = lib.FreeImage_GetTagKey(tag).decode("utf-8")
|
| 680 |
+
tag_type = lib.FreeImage_GetTagType(tag)
|
| 681 |
+
byte_size = lib.FreeImage_GetTagLength(tag)
|
| 682 |
+
char_ptr = ctypes.c_char * byte_size
|
| 683 |
+
data = char_ptr.from_address(lib.FreeImage_GetTagValue(tag))
|
| 684 |
+
# Convert in a way compatible with Pypy
|
| 685 |
+
tag_bytes = bytes(bytearray(data))
|
| 686 |
+
# The default value is the raw bytes
|
| 687 |
+
tag_val = tag_bytes
|
| 688 |
+
# Convert to a Python value in the metadata dict
|
| 689 |
+
if tag_type == METADATA_DATATYPE.FIDT_ASCII:
|
| 690 |
+
tag_val = tag_bytes.decode("utf-8", "replace")
|
| 691 |
+
elif tag_type in METADATA_DATATYPE.dtypes:
|
| 692 |
+
dtype = METADATA_DATATYPE.dtypes[tag_type]
|
| 693 |
+
if IS_PYPY and isinstance(dtype, (list, tuple)):
|
| 694 |
+
pass # pragma: no cover - or we get a segfault
|
| 695 |
+
else:
|
| 696 |
+
try:
|
| 697 |
+
tag_val = numpy.frombuffer(
|
| 698 |
+
tag_bytes, dtype=dtype
|
| 699 |
+
).copy()
|
| 700 |
+
if len(tag_val) == 1:
|
| 701 |
+
tag_val = tag_val[0]
|
| 702 |
+
except Exception: # pragma: no cover
|
| 703 |
+
pass
|
| 704 |
+
# Store data in dict
|
| 705 |
+
subdict = metadata.setdefault(model_name, Dict())
|
| 706 |
+
subdict[tag_name] = tag_val
|
| 707 |
+
# Next
|
| 708 |
+
more = lib.FreeImage_FindNextMetadata(
|
| 709 |
+
mdhandle, ctypes.byref(tag)
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
# Close search handle for current meta model
|
| 713 |
+
lib.FreeImage_FindCloseMetadata(mdhandle)
|
| 714 |
+
|
| 715 |
+
# Done
|
| 716 |
+
return metadata
|
| 717 |
+
|
| 718 |
+
def set_meta_data(self, metadata):
|
| 719 |
+
# Create a dict mapping model_name to number
|
| 720 |
+
models = {}
|
| 721 |
+
for name, number in METADATA_MODELS.__dict__.items():
|
| 722 |
+
if name.startswith("FIMD_"):
|
| 723 |
+
models[name[5:]] = number
|
| 724 |
+
|
| 725 |
+
# Create a mapping from numpy.dtype to METADATA_DATATYPE
|
| 726 |
+
def get_tag_type_number(dtype):
|
| 727 |
+
for number, numpy_dtype in METADATA_DATATYPE.dtypes.items():
|
| 728 |
+
if dtype == numpy_dtype:
|
| 729 |
+
return number
|
| 730 |
+
else:
|
| 731 |
+
return None
|
| 732 |
+
|
| 733 |
+
with self._fi as lib:
|
| 734 |
+
for model_name, subdict in metadata.items():
|
| 735 |
+
# Get model number
|
| 736 |
+
number = models.get(model_name, None)
|
| 737 |
+
if number is None:
|
| 738 |
+
continue # Unknown model, silent ignore
|
| 739 |
+
|
| 740 |
+
for tag_name, tag_val in subdict.items():
|
| 741 |
+
# Create new tag
|
| 742 |
+
tag = lib.FreeImage_CreateTag()
|
| 743 |
+
tag = ctypes.c_void_p(tag)
|
| 744 |
+
|
| 745 |
+
try:
|
| 746 |
+
# Convert Python value to FI type, val
|
| 747 |
+
is_ascii = False
|
| 748 |
+
if isinstance(tag_val, str):
|
| 749 |
+
try:
|
| 750 |
+
tag_bytes = tag_val.encode("ascii")
|
| 751 |
+
is_ascii = True
|
| 752 |
+
except UnicodeError:
|
| 753 |
+
pass
|
| 754 |
+
if is_ascii:
|
| 755 |
+
tag_type = METADATA_DATATYPE.FIDT_ASCII
|
| 756 |
+
tag_count = len(tag_bytes)
|
| 757 |
+
else:
|
| 758 |
+
if not hasattr(tag_val, "dtype"):
|
| 759 |
+
tag_val = numpy.array([tag_val])
|
| 760 |
+
tag_type = get_tag_type_number(tag_val.dtype)
|
| 761 |
+
if tag_type is None:
|
| 762 |
+
logger.warning(
|
| 763 |
+
"imageio.freeimage warning: Could not "
|
| 764 |
+
"determine tag type of %r." % tag_name
|
| 765 |
+
)
|
| 766 |
+
continue
|
| 767 |
+
tag_bytes = tag_val.tobytes()
|
| 768 |
+
tag_count = tag_val.size
|
| 769 |
+
# Set properties
|
| 770 |
+
lib.FreeImage_SetTagKey(tag, tag_name.encode("utf-8"))
|
| 771 |
+
lib.FreeImage_SetTagType(tag, tag_type)
|
| 772 |
+
lib.FreeImage_SetTagCount(tag, tag_count)
|
| 773 |
+
lib.FreeImage_SetTagLength(tag, len(tag_bytes))
|
| 774 |
+
lib.FreeImage_SetTagValue(tag, tag_bytes)
|
| 775 |
+
# Store tag
|
| 776 |
+
tag_key = lib.FreeImage_GetTagKey(tag)
|
| 777 |
+
lib.FreeImage_SetMetadata(number, self._bitmap, tag_key, tag)
|
| 778 |
+
|
| 779 |
+
except Exception as err: # pragma: no cover
|
| 780 |
+
logger.warning(
|
| 781 |
+
"imagio.freeimage warning: Could not set tag "
|
| 782 |
+
"%r: %s, %s"
|
| 783 |
+
% (tag_name, self._fi._get_error_message(), str(err))
|
| 784 |
+
)
|
| 785 |
+
finally:
|
| 786 |
+
lib.FreeImage_DeleteTag(tag)
|
| 787 |
+
|
| 788 |
+
|
| 789 |
+
class FIBitmap(FIBaseBitmap):
|
| 790 |
+
"""Wrapper for the FI bitmap object."""
|
| 791 |
+
|
| 792 |
+
def allocate(self, array):
|
| 793 |
+
# Prepare array
|
| 794 |
+
assert isinstance(array, numpy.ndarray)
|
| 795 |
+
shape = array.shape
|
| 796 |
+
dtype = array.dtype
|
| 797 |
+
|
| 798 |
+
# Get shape and channel info
|
| 799 |
+
r, c = shape[:2]
|
| 800 |
+
if len(shape) == 2:
|
| 801 |
+
n_channels = 1
|
| 802 |
+
elif len(shape) == 3:
|
| 803 |
+
n_channels = shape[2]
|
| 804 |
+
else:
|
| 805 |
+
n_channels = shape[0]
|
| 806 |
+
|
| 807 |
+
# Get fi_type
|
| 808 |
+
try:
|
| 809 |
+
fi_type = FI_TYPES.fi_types[(dtype.type, n_channels)]
|
| 810 |
+
self._fi_type = fi_type
|
| 811 |
+
except KeyError:
|
| 812 |
+
raise ValueError("Cannot write arrays of given type and shape.")
|
| 813 |
+
|
| 814 |
+
# Allocate bitmap
|
| 815 |
+
with self._fi as lib:
|
| 816 |
+
bpp = 8 * dtype.itemsize * n_channels
|
| 817 |
+
bitmap = lib.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0)
|
| 818 |
+
bitmap = ctypes.c_void_p(bitmap)
|
| 819 |
+
|
| 820 |
+
# Check and store
|
| 821 |
+
if not bitmap: # pragma: no cover
|
| 822 |
+
raise RuntimeError(
|
| 823 |
+
"Could not allocate bitmap for storage: %s"
|
| 824 |
+
% self._fi._get_error_message()
|
| 825 |
+
)
|
| 826 |
+
self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
|
| 827 |
+
|
| 828 |
+
def load_from_filename(self, filename=None):
|
| 829 |
+
if filename is None:
|
| 830 |
+
filename = self._filename
|
| 831 |
+
|
| 832 |
+
with self._fi as lib:
|
| 833 |
+
# Create bitmap
|
| 834 |
+
bitmap = lib.FreeImage_Load(self._ftype, efn(filename), self._flags)
|
| 835 |
+
bitmap = ctypes.c_void_p(bitmap)
|
| 836 |
+
|
| 837 |
+
# Check and store
|
| 838 |
+
if not bitmap: # pragma: no cover
|
| 839 |
+
raise ValueError(
|
| 840 |
+
'Could not load bitmap "%s": %s'
|
| 841 |
+
% (self._filename, self._fi._get_error_message())
|
| 842 |
+
)
|
| 843 |
+
self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
|
| 844 |
+
|
| 845 |
+
# def load_from_bytes(self, bb):
|
| 846 |
+
# with self._fi as lib:
|
| 847 |
+
# # Create bitmap
|
| 848 |
+
# fimemory = lib.FreeImage_OpenMemory(
|
| 849 |
+
# ctypes.c_char_p(bb), len(bb))
|
| 850 |
+
# bitmap = lib.FreeImage_LoadFromMemory(
|
| 851 |
+
# self._ftype, ctypes.c_void_p(fimemory), self._flags)
|
| 852 |
+
# bitmap = ctypes.c_void_p(bitmap)
|
| 853 |
+
# lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory))
|
| 854 |
+
#
|
| 855 |
+
# # Check
|
| 856 |
+
# if not bitmap:
|
| 857 |
+
# raise ValueError('Could not load bitmap "%s": %s'
|
| 858 |
+
# % (self._filename, self._fi._get_error_message()))
|
| 859 |
+
# else:
|
| 860 |
+
# self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
|
| 861 |
+
|
| 862 |
+
def save_to_filename(self, filename=None):
|
| 863 |
+
if filename is None:
|
| 864 |
+
filename = self._filename
|
| 865 |
+
|
| 866 |
+
ftype = self._ftype
|
| 867 |
+
bitmap = self._bitmap
|
| 868 |
+
fi_type = self._fi_type # element type
|
| 869 |
+
|
| 870 |
+
with self._fi as lib:
|
| 871 |
+
# Check if can write
|
| 872 |
+
if fi_type == FI_TYPES.FIT_BITMAP:
|
| 873 |
+
can_write = lib.FreeImage_FIFSupportsExportBPP(
|
| 874 |
+
ftype, lib.FreeImage_GetBPP(bitmap)
|
| 875 |
+
)
|
| 876 |
+
else:
|
| 877 |
+
can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type)
|
| 878 |
+
if not can_write:
|
| 879 |
+
raise TypeError("Cannot save image of this format to this file type")
|
| 880 |
+
|
| 881 |
+
# Save to file
|
| 882 |
+
res = lib.FreeImage_Save(ftype, bitmap, efn(filename), self._flags)
|
| 883 |
+
# Check
|
| 884 |
+
if res is None: # pragma: no cover, we do so many checks, this is rare
|
| 885 |
+
raise RuntimeError(
|
| 886 |
+
f"Could not save file `{self._filename}`: {self._fi._get_error_message()}"
|
| 887 |
+
)
|
| 888 |
+
|
| 889 |
+
# def save_to_bytes(self):
|
| 890 |
+
# ftype = self._ftype
|
| 891 |
+
# bitmap = self._bitmap
|
| 892 |
+
# fi_type = self._fi_type # element type
|
| 893 |
+
#
|
| 894 |
+
# with self._fi as lib:
|
| 895 |
+
# # Check if can write
|
| 896 |
+
# if fi_type == FI_TYPES.FIT_BITMAP:
|
| 897 |
+
# can_write = lib.FreeImage_FIFSupportsExportBPP(ftype,
|
| 898 |
+
# lib.FreeImage_GetBPP(bitmap))
|
| 899 |
+
# else:
|
| 900 |
+
# can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type)
|
| 901 |
+
# if not can_write:
|
| 902 |
+
# raise TypeError('Cannot save image of this format '
|
| 903 |
+
# 'to this file type')
|
| 904 |
+
#
|
| 905 |
+
# # Extract the bytes
|
| 906 |
+
# fimemory = lib.FreeImage_OpenMemory(0, 0)
|
| 907 |
+
# res = lib.FreeImage_SaveToMemory(ftype, bitmap,
|
| 908 |
+
# ctypes.c_void_p(fimemory),
|
| 909 |
+
# self._flags)
|
| 910 |
+
# if res:
|
| 911 |
+
# N = lib.FreeImage_TellMemory(ctypes.c_void_p(fimemory))
|
| 912 |
+
# result = ctypes.create_string_buffer(N)
|
| 913 |
+
# lib.FreeImage_SeekMemory(ctypes.c_void_p(fimemory), 0)
|
| 914 |
+
# lib.FreeImage_ReadMemory(result, 1, N, ctypes.c_void_p(fimemory))
|
| 915 |
+
# result = result.raw
|
| 916 |
+
# lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory))
|
| 917 |
+
#
|
| 918 |
+
# # Check
|
| 919 |
+
# if not res:
|
| 920 |
+
# raise RuntimeError('Could not save file "%s": %s'
|
| 921 |
+
# % (self._filename, self._fi._get_error_message()))
|
| 922 |
+
#
|
| 923 |
+
# # Done
|
| 924 |
+
# return result
|
| 925 |
+
|
| 926 |
+
def get_image_data(self):
|
| 927 |
+
dtype, shape, bpp = self._get_type_and_shape()
|
| 928 |
+
array = self._wrap_bitmap_bits_in_array(shape, dtype, False)
|
| 929 |
+
with self._fi as lib:
|
| 930 |
+
isle = lib.FreeImage_IsLittleEndian()
|
| 931 |
+
|
| 932 |
+
# swizzle the color components and flip the scanlines to go from
|
| 933 |
+
# FreeImage's BGR[A] and upside-down internal memory format to
|
| 934 |
+
# something more normal
|
| 935 |
+
def n(arr):
|
| 936 |
+
# return arr[..., ::-1].T # Does not work on numpypy yet
|
| 937 |
+
if arr.ndim == 1: # pragma: no cover
|
| 938 |
+
return arr[::-1].T
|
| 939 |
+
elif arr.ndim == 2: # Always the case here ...
|
| 940 |
+
return arr[:, ::-1].T
|
| 941 |
+
elif arr.ndim == 3: # pragma: no cover
|
| 942 |
+
return arr[:, :, ::-1].T
|
| 943 |
+
elif arr.ndim == 4: # pragma: no cover
|
| 944 |
+
return arr[:, :, :, ::-1].T
|
| 945 |
+
|
| 946 |
+
if len(shape) == 3 and isle and dtype.type == numpy.uint8:
|
| 947 |
+
b = n(array[0])
|
| 948 |
+
g = n(array[1])
|
| 949 |
+
r = n(array[2])
|
| 950 |
+
if shape[0] == 3:
|
| 951 |
+
return numpy.dstack((r, g, b))
|
| 952 |
+
elif shape[0] == 4:
|
| 953 |
+
a = n(array[3])
|
| 954 |
+
return numpy.dstack((r, g, b, a))
|
| 955 |
+
else: # pragma: no cover - we check this earlier
|
| 956 |
+
raise ValueError("Cannot handle images of shape %s" % shape)
|
| 957 |
+
|
| 958 |
+
# We need to copy because array does *not* own its memory
|
| 959 |
+
# after bitmap is freed.
|
| 960 |
+
a = n(array).copy()
|
| 961 |
+
return a
|
| 962 |
+
|
| 963 |
+
def set_image_data(self, array):
|
| 964 |
+
# Prepare array
|
| 965 |
+
assert isinstance(array, numpy.ndarray)
|
| 966 |
+
shape = array.shape
|
| 967 |
+
dtype = array.dtype
|
| 968 |
+
with self._fi as lib:
|
| 969 |
+
isle = lib.FreeImage_IsLittleEndian()
|
| 970 |
+
|
| 971 |
+
# Calculate shape and channels
|
| 972 |
+
r, c = shape[:2]
|
| 973 |
+
if len(shape) == 2:
|
| 974 |
+
n_channels = 1
|
| 975 |
+
w_shape = (c, r)
|
| 976 |
+
elif len(shape) == 3:
|
| 977 |
+
n_channels = shape[2]
|
| 978 |
+
w_shape = (n_channels, c, r)
|
| 979 |
+
else:
|
| 980 |
+
n_channels = shape[0]
|
| 981 |
+
|
| 982 |
+
def n(arr): # normalise to freeimage's in-memory format
|
| 983 |
+
return arr[::-1].T
|
| 984 |
+
|
| 985 |
+
wrapped_array = self._wrap_bitmap_bits_in_array(w_shape, dtype, True)
|
| 986 |
+
# swizzle the color components and flip the scanlines to go to
|
| 987 |
+
# FreeImage's BGR[A] and upside-down internal memory format
|
| 988 |
+
# The BGR[A] order is only used for 8bits per channel images
|
| 989 |
+
# on little endian machines. For everything else RGB[A] is
|
| 990 |
+
# used.
|
| 991 |
+
if len(shape) == 3 and isle and dtype.type == numpy.uint8:
|
| 992 |
+
R = array[:, :, 0]
|
| 993 |
+
G = array[:, :, 1]
|
| 994 |
+
B = array[:, :, 2]
|
| 995 |
+
wrapped_array[0] = n(B)
|
| 996 |
+
wrapped_array[1] = n(G)
|
| 997 |
+
wrapped_array[2] = n(R)
|
| 998 |
+
if shape[2] == 4:
|
| 999 |
+
A = array[:, :, 3]
|
| 1000 |
+
wrapped_array[3] = n(A)
|
| 1001 |
+
else:
|
| 1002 |
+
wrapped_array[:] = n(array)
|
| 1003 |
+
if self._need_finish:
|
| 1004 |
+
self._finish_wrapped_array(wrapped_array)
|
| 1005 |
+
|
| 1006 |
+
if len(shape) == 2 and dtype.type == numpy.uint8:
|
| 1007 |
+
with self._fi as lib:
|
| 1008 |
+
palette = lib.FreeImage_GetPalette(self._bitmap)
|
| 1009 |
+
palette = ctypes.c_void_p(palette)
|
| 1010 |
+
if not palette:
|
| 1011 |
+
raise RuntimeError("Could not get image palette")
|
| 1012 |
+
try:
|
| 1013 |
+
palette_data = GREY_PALETTE.ctypes.data
|
| 1014 |
+
except Exception: # pragma: no cover - IS_PYPY
|
| 1015 |
+
palette_data = GREY_PALETTE.__array_interface__["data"][0]
|
| 1016 |
+
ctypes.memmove(palette, palette_data, 1024)
|
| 1017 |
+
|
| 1018 |
+
def _wrap_bitmap_bits_in_array(self, shape, dtype, save):
|
| 1019 |
+
"""Return an ndarray view on the data in a FreeImage bitmap. Only
|
| 1020 |
+
valid for as long as the bitmap is loaded (if single page) / locked
|
| 1021 |
+
in memory (if multipage). This is used in loading data, but
|
| 1022 |
+
also during saving, to prepare a strided numpy array buffer.
|
| 1023 |
+
|
| 1024 |
+
"""
|
| 1025 |
+
# Get bitmap info
|
| 1026 |
+
with self._fi as lib:
|
| 1027 |
+
pitch = lib.FreeImage_GetPitch(self._bitmap)
|
| 1028 |
+
bits = lib.FreeImage_GetBits(self._bitmap)
|
| 1029 |
+
|
| 1030 |
+
# Get more info
|
| 1031 |
+
height = shape[-1]
|
| 1032 |
+
byte_size = height * pitch
|
| 1033 |
+
itemsize = dtype.itemsize
|
| 1034 |
+
|
| 1035 |
+
# Get strides
|
| 1036 |
+
if len(shape) == 3:
|
| 1037 |
+
strides = (itemsize, shape[0] * itemsize, pitch)
|
| 1038 |
+
else:
|
| 1039 |
+
strides = (itemsize, pitch)
|
| 1040 |
+
|
| 1041 |
+
# Create numpy array and return
|
| 1042 |
+
data = (ctypes.c_char * byte_size).from_address(bits)
|
| 1043 |
+
try:
|
| 1044 |
+
self._need_finish = False
|
| 1045 |
+
if TEST_NUMPY_NO_STRIDES:
|
| 1046 |
+
raise NotImplementedError()
|
| 1047 |
+
return numpy.ndarray(shape, dtype=dtype, buffer=data, strides=strides)
|
| 1048 |
+
except NotImplementedError:
|
| 1049 |
+
# IS_PYPY - not very efficient. We create a C-contiguous
|
| 1050 |
+
# numpy array (because pypy does not support Fortran-order)
|
| 1051 |
+
# and shape it such that the rest of the code can remain.
|
| 1052 |
+
if save:
|
| 1053 |
+
self._need_finish = True # Flag to use _finish_wrapped_array
|
| 1054 |
+
return numpy.zeros(shape, dtype=dtype)
|
| 1055 |
+
else:
|
| 1056 |
+
bb = bytes(bytearray(data))
|
| 1057 |
+
array = numpy.frombuffer(bb, dtype=dtype).copy()
|
| 1058 |
+
# Deal with strides
|
| 1059 |
+
if len(shape) == 3:
|
| 1060 |
+
array.shape = shape[2], strides[-1] // shape[0], shape[0]
|
| 1061 |
+
array2 = array[: shape[2], : shape[1], : shape[0]]
|
| 1062 |
+
array = numpy.zeros(shape, dtype=array.dtype)
|
| 1063 |
+
for i in range(shape[0]):
|
| 1064 |
+
array[i] = array2[:, :, i].T
|
| 1065 |
+
else:
|
| 1066 |
+
array.shape = shape[1], strides[-1]
|
| 1067 |
+
array = array[: shape[1], : shape[0]].T
|
| 1068 |
+
return array
|
| 1069 |
+
|
| 1070 |
+
def _finish_wrapped_array(self, array): # IS_PYPY
|
| 1071 |
+
"""Hardcore way to inject numpy array in bitmap."""
|
| 1072 |
+
# Get bitmap info
|
| 1073 |
+
with self._fi as lib:
|
| 1074 |
+
pitch = lib.FreeImage_GetPitch(self._bitmap)
|
| 1075 |
+
bits = lib.FreeImage_GetBits(self._bitmap)
|
| 1076 |
+
bpp = lib.FreeImage_GetBPP(self._bitmap)
|
| 1077 |
+
# Get channels and realwidth
|
| 1078 |
+
nchannels = bpp // 8 // array.itemsize
|
| 1079 |
+
realwidth = pitch // nchannels
|
| 1080 |
+
# Apply padding for pitch if necessary
|
| 1081 |
+
extra = realwidth - array.shape[-2]
|
| 1082 |
+
assert 0 <= extra < 10
|
| 1083 |
+
# Make sort of Fortran, also take padding (i.e. pitch) into account
|
| 1084 |
+
newshape = array.shape[-1], realwidth, nchannels
|
| 1085 |
+
array2 = numpy.zeros(newshape, array.dtype)
|
| 1086 |
+
if nchannels == 1:
|
| 1087 |
+
array2[:, : array.shape[-2], 0] = array.T
|
| 1088 |
+
else:
|
| 1089 |
+
for i in range(nchannels):
|
| 1090 |
+
array2[:, : array.shape[-2], i] = array[i, :, :].T
|
| 1091 |
+
# copy data
|
| 1092 |
+
data_ptr = array2.__array_interface__["data"][0]
|
| 1093 |
+
ctypes.memmove(bits, data_ptr, array2.nbytes)
|
| 1094 |
+
del array2
|
| 1095 |
+
|
| 1096 |
+
def _get_type_and_shape(self):
|
| 1097 |
+
bitmap = self._bitmap
|
| 1098 |
+
|
| 1099 |
+
# Get info on bitmap
|
| 1100 |
+
with self._fi as lib:
|
| 1101 |
+
w = lib.FreeImage_GetWidth(bitmap)
|
| 1102 |
+
h = lib.FreeImage_GetHeight(bitmap)
|
| 1103 |
+
self._fi_type = fi_type = lib.FreeImage_GetImageType(bitmap)
|
| 1104 |
+
if not fi_type:
|
| 1105 |
+
raise ValueError("Unknown image pixel type")
|
| 1106 |
+
|
| 1107 |
+
# Determine required props for numpy array
|
| 1108 |
+
bpp = None
|
| 1109 |
+
dtype = FI_TYPES.dtypes[fi_type]
|
| 1110 |
+
|
| 1111 |
+
if fi_type == FI_TYPES.FIT_BITMAP:
|
| 1112 |
+
with self._fi as lib:
|
| 1113 |
+
bpp = lib.FreeImage_GetBPP(bitmap)
|
| 1114 |
+
has_pallette = lib.FreeImage_GetColorsUsed(bitmap)
|
| 1115 |
+
if has_pallette:
|
| 1116 |
+
# Examine the palette. If it is grayscale, we return as such
|
| 1117 |
+
if has_pallette == 256:
|
| 1118 |
+
palette = lib.FreeImage_GetPalette(bitmap)
|
| 1119 |
+
palette = ctypes.c_void_p(palette)
|
| 1120 |
+
p = (ctypes.c_uint8 * (256 * 4)).from_address(palette.value)
|
| 1121 |
+
p = numpy.frombuffer(p, numpy.uint32).copy()
|
| 1122 |
+
if (GREY_PALETTE == p).all():
|
| 1123 |
+
extra_dims = []
|
| 1124 |
+
return numpy.dtype(dtype), extra_dims + [w, h], bpp
|
| 1125 |
+
# Convert bitmap and call this method again
|
| 1126 |
+
newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap)
|
| 1127 |
+
newbitmap = ctypes.c_void_p(newbitmap)
|
| 1128 |
+
self._set_bitmap(newbitmap)
|
| 1129 |
+
return self._get_type_and_shape()
|
| 1130 |
+
elif bpp == 8:
|
| 1131 |
+
extra_dims = []
|
| 1132 |
+
elif bpp == 24:
|
| 1133 |
+
extra_dims = [3]
|
| 1134 |
+
elif bpp == 32:
|
| 1135 |
+
extra_dims = [4]
|
| 1136 |
+
else: # pragma: no cover
|
| 1137 |
+
# raise ValueError('Cannot convert %d BPP bitmap' % bpp)
|
| 1138 |
+
# Convert bitmap and call this method again
|
| 1139 |
+
newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap)
|
| 1140 |
+
newbitmap = ctypes.c_void_p(newbitmap)
|
| 1141 |
+
self._set_bitmap(newbitmap)
|
| 1142 |
+
return self._get_type_and_shape()
|
| 1143 |
+
else:
|
| 1144 |
+
extra_dims = FI_TYPES.extra_dims[fi_type]
|
| 1145 |
+
|
| 1146 |
+
# Return dtype and shape
|
| 1147 |
+
return numpy.dtype(dtype), extra_dims + [w, h], bpp
|
| 1148 |
+
|
| 1149 |
+
def quantize(self, quantizer=0, palettesize=256):
|
| 1150 |
+
"""Quantize the bitmap to make it 8-bit (paletted). Returns a new
|
| 1151 |
+
FIBitmap object.
|
| 1152 |
+
Only for 24 bit images.
|
| 1153 |
+
"""
|
| 1154 |
+
with self._fi as lib:
|
| 1155 |
+
# New bitmap
|
| 1156 |
+
bitmap = lib.FreeImage_ColorQuantizeEx(
|
| 1157 |
+
self._bitmap, quantizer, palettesize, 0, None
|
| 1158 |
+
)
|
| 1159 |
+
bitmap = ctypes.c_void_p(bitmap)
|
| 1160 |
+
|
| 1161 |
+
# Check and return
|
| 1162 |
+
if not bitmap:
|
| 1163 |
+
raise ValueError(
|
| 1164 |
+
'Could not quantize bitmap "%s": %s'
|
| 1165 |
+
% (self._filename, self._fi._get_error_message())
|
| 1166 |
+
)
|
| 1167 |
+
|
| 1168 |
+
new = FIBitmap(self._fi, self._filename, self._ftype, self._flags)
|
| 1169 |
+
new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
|
| 1170 |
+
new._fi_type = self._fi_type
|
| 1171 |
+
return new
|
| 1172 |
+
|
| 1173 |
+
|
| 1174 |
+
# def convert_to_32bit(self):
|
| 1175 |
+
# """ Convert to 32bit image.
|
| 1176 |
+
# """
|
| 1177 |
+
# with self._fi as lib:
|
| 1178 |
+
# # New bitmap
|
| 1179 |
+
# bitmap = lib.FreeImage_ConvertTo32Bits(self._bitmap)
|
| 1180 |
+
# bitmap = ctypes.c_void_p(bitmap)
|
| 1181 |
+
#
|
| 1182 |
+
# # Check and return
|
| 1183 |
+
# if not bitmap:
|
| 1184 |
+
# raise ValueError('Could not convert bitmap to 32bit "%s": %s' %
|
| 1185 |
+
# (self._filename,
|
| 1186 |
+
# self._fi._get_error_message()))
|
| 1187 |
+
# else:
|
| 1188 |
+
# new = FIBitmap(self._fi, self._filename, self._ftype,
|
| 1189 |
+
# self._flags)
|
| 1190 |
+
# new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap))
|
| 1191 |
+
# new._fi_type = self._fi_type
|
| 1192 |
+
# return new
|
| 1193 |
+
|
| 1194 |
+
|
| 1195 |
+
class FIMultipageBitmap(FIBaseBitmap):
|
| 1196 |
+
"""Wrapper for the multipage FI bitmap object."""
|
| 1197 |
+
|
| 1198 |
+
def load_from_filename(self, filename=None):
|
| 1199 |
+
if filename is None: # pragma: no cover
|
| 1200 |
+
filename = self._filename
|
| 1201 |
+
|
| 1202 |
+
# Prepare
|
| 1203 |
+
create_new = False
|
| 1204 |
+
read_only = True
|
| 1205 |
+
keep_cache_in_memory = False
|
| 1206 |
+
|
| 1207 |
+
# Try opening
|
| 1208 |
+
with self._fi as lib:
|
| 1209 |
+
# Create bitmap
|
| 1210 |
+
multibitmap = lib.FreeImage_OpenMultiBitmap(
|
| 1211 |
+
self._ftype,
|
| 1212 |
+
efn(filename),
|
| 1213 |
+
create_new,
|
| 1214 |
+
read_only,
|
| 1215 |
+
keep_cache_in_memory,
|
| 1216 |
+
self._flags,
|
| 1217 |
+
)
|
| 1218 |
+
multibitmap = ctypes.c_void_p(multibitmap)
|
| 1219 |
+
|
| 1220 |
+
# Check
|
| 1221 |
+
if not multibitmap: # pragma: no cover
|
| 1222 |
+
err = self._fi._get_error_message()
|
| 1223 |
+
raise ValueError(
|
| 1224 |
+
'Could not open file "%s" as multi-image: %s'
|
| 1225 |
+
% (self._filename, err)
|
| 1226 |
+
)
|
| 1227 |
+
self._set_bitmap(multibitmap, (lib.FreeImage_CloseMultiBitmap, multibitmap))
|
| 1228 |
+
|
| 1229 |
+
# def load_from_bytes(self, bb):
|
| 1230 |
+
# with self._fi as lib:
|
| 1231 |
+
# # Create bitmap
|
| 1232 |
+
# fimemory = lib.FreeImage_OpenMemory(
|
| 1233 |
+
# ctypes.c_char_p(bb), len(bb))
|
| 1234 |
+
# multibitmap = lib.FreeImage_LoadMultiBitmapFromMemory(
|
| 1235 |
+
# self._ftype, ctypes.c_void_p(fimemory), self._flags)
|
| 1236 |
+
# multibitmap = ctypes.c_void_p(multibitmap)
|
| 1237 |
+
# #lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory))
|
| 1238 |
+
# self._mem = fimemory
|
| 1239 |
+
# self._bytes = bb
|
| 1240 |
+
# # Check
|
| 1241 |
+
# if not multibitmap:
|
| 1242 |
+
# raise ValueError('Could not load multibitmap "%s": %s'
|
| 1243 |
+
# % (self._filename, self._fi._get_error_message()))
|
| 1244 |
+
# else:
|
| 1245 |
+
# self._set_bitmap(multibitmap,
|
| 1246 |
+
# (lib.FreeImage_CloseMultiBitmap, multibitmap))
|
| 1247 |
+
|
| 1248 |
+
def save_to_filename(self, filename=None):
|
| 1249 |
+
if filename is None: # pragma: no cover
|
| 1250 |
+
filename = self._filename
|
| 1251 |
+
|
| 1252 |
+
# Prepare
|
| 1253 |
+
create_new = True
|
| 1254 |
+
read_only = False
|
| 1255 |
+
keep_cache_in_memory = False
|
| 1256 |
+
|
| 1257 |
+
# Open the file
|
| 1258 |
+
# todo: Set flags at close func
|
| 1259 |
+
with self._fi as lib:
|
| 1260 |
+
multibitmap = lib.FreeImage_OpenMultiBitmap(
|
| 1261 |
+
self._ftype,
|
| 1262 |
+
efn(filename),
|
| 1263 |
+
create_new,
|
| 1264 |
+
read_only,
|
| 1265 |
+
keep_cache_in_memory,
|
| 1266 |
+
0,
|
| 1267 |
+
)
|
| 1268 |
+
multibitmap = ctypes.c_void_p(multibitmap)
|
| 1269 |
+
|
| 1270 |
+
# Check
|
| 1271 |
+
if not multibitmap: # pragma: no cover
|
| 1272 |
+
msg = 'Could not open file "%s" for writing multi-image: %s' % (
|
| 1273 |
+
self._filename,
|
| 1274 |
+
self._fi._get_error_message(),
|
| 1275 |
+
)
|
| 1276 |
+
raise ValueError(msg)
|
| 1277 |
+
self._set_bitmap(multibitmap, (lib.FreeImage_CloseMultiBitmap, multibitmap))
|
| 1278 |
+
|
| 1279 |
+
def __len__(self):
|
| 1280 |
+
with self._fi as lib:
|
| 1281 |
+
return lib.FreeImage_GetPageCount(self._bitmap)
|
| 1282 |
+
|
| 1283 |
+
def get_page(self, index):
|
| 1284 |
+
"""Return the sub-bitmap for the given page index.
|
| 1285 |
+
Please close the returned bitmap when done.
|
| 1286 |
+
"""
|
| 1287 |
+
with self._fi as lib:
|
| 1288 |
+
# Create low-level bitmap in freeimage
|
| 1289 |
+
bitmap = lib.FreeImage_LockPage(self._bitmap, index)
|
| 1290 |
+
bitmap = ctypes.c_void_p(bitmap)
|
| 1291 |
+
if not bitmap: # pragma: no cover
|
| 1292 |
+
raise ValueError(
|
| 1293 |
+
"Could not open sub-image %i in %r: %s"
|
| 1294 |
+
% (index, self._filename, self._fi._get_error_message())
|
| 1295 |
+
)
|
| 1296 |
+
|
| 1297 |
+
# Get bitmap object to wrap this bitmap
|
| 1298 |
+
bm = FIBitmap(self._fi, self._filename, self._ftype, self._flags)
|
| 1299 |
+
bm._set_bitmap(
|
| 1300 |
+
bitmap, (lib.FreeImage_UnlockPage, self._bitmap, bitmap, False)
|
| 1301 |
+
)
|
| 1302 |
+
return bm
|
| 1303 |
+
|
| 1304 |
+
def append_bitmap(self, bitmap):
|
| 1305 |
+
"""Add a sub-bitmap to the multi-page bitmap."""
|
| 1306 |
+
with self._fi as lib:
|
| 1307 |
+
# no return value
|
| 1308 |
+
lib.FreeImage_AppendPage(self._bitmap, bitmap._bitmap)
|
| 1309 |
+
|
| 1310 |
+
|
| 1311 |
+
# Create instance
|
| 1312 |
+
fi = Freeimage()
|
parrot/lib/python3.10/site-packages/imageio/plugins/_swf.py
ADDED
|
@@ -0,0 +1,897 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 3 |
+
# This code was taken from https://github.com/almarklein/visvis/blob/master/vvmovie/images2swf.py
|
| 4 |
+
|
| 5 |
+
# styletest: ignore E261
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Provides a function (write_swf) to store a series of numpy arrays in an
|
| 9 |
+
SWF movie, that can be played on a wide range of OS's.
|
| 10 |
+
|
| 11 |
+
In desperation of wanting to share animated images, and then lacking a good
|
| 12 |
+
writer for animated gif or .avi, I decided to look into SWF. This format
|
| 13 |
+
is very well documented.
|
| 14 |
+
|
| 15 |
+
This is a pure python module to create an SWF file that shows a series
|
| 16 |
+
of images. The images are stored using the DEFLATE algorithm (same as
|
| 17 |
+
PNG and ZIP and which is included in the standard Python distribution).
|
| 18 |
+
As this compression algorithm is much more effective than that used in
|
| 19 |
+
GIF images, we obtain better quality (24 bit colors + alpha channel)
|
| 20 |
+
while still producesing smaller files (a test showed ~75%). Although
|
| 21 |
+
SWF also allows for JPEG compression, doing so would probably require
|
| 22 |
+
a third party library for the JPEG encoding/decoding, we could
|
| 23 |
+
perhaps do this via Pillow or freeimage.
|
| 24 |
+
|
| 25 |
+
sources and tools:
|
| 26 |
+
|
| 27 |
+
- SWF on wikipedia
|
| 28 |
+
- Adobes "SWF File Format Specification" version 10
|
| 29 |
+
(http://www.adobe.com/devnet/swf/pdf/swf_file_format_spec_v10.pdf)
|
| 30 |
+
- swftools (swfdump in specific) for debugging
|
| 31 |
+
- iwisoft swf2avi can be used to convert swf to avi/mpg/flv with really
|
| 32 |
+
good quality, while file size is reduced with factors 20-100.
|
| 33 |
+
A good program in my opinion. The free version has the limitation
|
| 34 |
+
of a watermark in the upper left corner.
|
| 35 |
+
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
import os
|
| 39 |
+
import zlib
|
| 40 |
+
import time # noqa
|
| 41 |
+
import logging
|
| 42 |
+
|
| 43 |
+
import numpy as np
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
logger = logging.getLogger(__name__)
|
| 47 |
+
|
| 48 |
+
# todo: use Pillow to support reading JPEG images from SWF?
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# Base functions and classes
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class BitArray:
|
| 55 |
+
"""Dynamic array of bits that automatically resizes
|
| 56 |
+
with factors of two.
|
| 57 |
+
Append bits using .append() or +=
|
| 58 |
+
You can reverse bits using .reverse()
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(self, initvalue=None):
|
| 62 |
+
self.data = np.zeros((16,), dtype=np.uint8)
|
| 63 |
+
self._len = 0
|
| 64 |
+
if initvalue is not None:
|
| 65 |
+
self.append(initvalue)
|
| 66 |
+
|
| 67 |
+
def __len__(self):
|
| 68 |
+
return self._len # self.data.shape[0]
|
| 69 |
+
|
| 70 |
+
def __repr__(self):
|
| 71 |
+
return self.data[: self._len].tobytes().decode("ascii")
|
| 72 |
+
|
| 73 |
+
def _checkSize(self):
|
| 74 |
+
# check length... grow if necessary
|
| 75 |
+
arraylen = self.data.shape[0]
|
| 76 |
+
if self._len >= arraylen:
|
| 77 |
+
tmp = np.zeros((arraylen * 2,), dtype=np.uint8)
|
| 78 |
+
tmp[: self._len] = self.data[: self._len]
|
| 79 |
+
self.data = tmp
|
| 80 |
+
|
| 81 |
+
def __add__(self, value):
|
| 82 |
+
self.append(value)
|
| 83 |
+
return self
|
| 84 |
+
|
| 85 |
+
def append(self, bits):
|
| 86 |
+
# check input
|
| 87 |
+
if isinstance(bits, BitArray):
|
| 88 |
+
bits = str(bits)
|
| 89 |
+
if isinstance(bits, int): # pragma: no cover - we dont use it
|
| 90 |
+
bits = str(bits)
|
| 91 |
+
if not isinstance(bits, str): # pragma: no cover
|
| 92 |
+
raise ValueError("Append bits as strings or integers!")
|
| 93 |
+
|
| 94 |
+
# add bits
|
| 95 |
+
for bit in bits:
|
| 96 |
+
self.data[self._len] = ord(bit)
|
| 97 |
+
self._len += 1
|
| 98 |
+
self._checkSize()
|
| 99 |
+
|
| 100 |
+
def reverse(self):
|
| 101 |
+
"""In-place reverse."""
|
| 102 |
+
tmp = self.data[: self._len].copy()
|
| 103 |
+
self.data[: self._len] = tmp[::-1]
|
| 104 |
+
|
| 105 |
+
def tobytes(self):
|
| 106 |
+
"""Convert to bytes. If necessary,
|
| 107 |
+
zeros are padded to the end (right side).
|
| 108 |
+
"""
|
| 109 |
+
bits = str(self)
|
| 110 |
+
|
| 111 |
+
# determine number of bytes
|
| 112 |
+
nbytes = 0
|
| 113 |
+
while nbytes * 8 < len(bits):
|
| 114 |
+
nbytes += 1
|
| 115 |
+
# pad
|
| 116 |
+
bits = bits.ljust(nbytes * 8, "0")
|
| 117 |
+
|
| 118 |
+
# go from bits to bytes
|
| 119 |
+
bb = bytes()
|
| 120 |
+
for i in range(nbytes):
|
| 121 |
+
tmp = int(bits[i * 8 : (i + 1) * 8], 2)
|
| 122 |
+
bb += int2uint8(tmp)
|
| 123 |
+
|
| 124 |
+
# done
|
| 125 |
+
return bb
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def int2uint32(i):
|
| 129 |
+
return int(i).to_bytes(4, "little")
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def int2uint16(i):
|
| 133 |
+
return int(i).to_bytes(2, "little")
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def int2uint8(i):
|
| 137 |
+
return int(i).to_bytes(1, "little")
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def int2bits(i, n=None):
|
| 141 |
+
"""convert int to a string of bits (0's and 1's in a string),
|
| 142 |
+
pad to n elements. Convert back using int(ss,2)."""
|
| 143 |
+
ii = i
|
| 144 |
+
|
| 145 |
+
# make bits
|
| 146 |
+
bb = BitArray()
|
| 147 |
+
while ii > 0:
|
| 148 |
+
bb += str(ii % 2)
|
| 149 |
+
ii = ii >> 1
|
| 150 |
+
bb.reverse()
|
| 151 |
+
|
| 152 |
+
# justify
|
| 153 |
+
if n is not None:
|
| 154 |
+
if len(bb) > n: # pragma: no cover
|
| 155 |
+
raise ValueError("int2bits fail: len larger than padlength.")
|
| 156 |
+
bb = str(bb).rjust(n, "0")
|
| 157 |
+
|
| 158 |
+
# done
|
| 159 |
+
return BitArray(bb)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def bits2int(bb, n=8):
|
| 163 |
+
# Init
|
| 164 |
+
value = ""
|
| 165 |
+
|
| 166 |
+
# Get value in bits
|
| 167 |
+
for i in range(len(bb)):
|
| 168 |
+
b = bb[i : i + 1]
|
| 169 |
+
tmp = bin(ord(b))[2:]
|
| 170 |
+
# value += tmp.rjust(8,'0')
|
| 171 |
+
value = tmp.rjust(8, "0") + value
|
| 172 |
+
|
| 173 |
+
# Make decimal
|
| 174 |
+
return int(value[:n], 2)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def get_type_and_len(bb):
|
| 178 |
+
"""bb should be 6 bytes at least
|
| 179 |
+
Return (type, length, length_of_full_tag)
|
| 180 |
+
"""
|
| 181 |
+
# Init
|
| 182 |
+
value = ""
|
| 183 |
+
|
| 184 |
+
# Get first 16 bits
|
| 185 |
+
for i in range(2):
|
| 186 |
+
b = bb[i : i + 1]
|
| 187 |
+
tmp = bin(ord(b))[2:]
|
| 188 |
+
# value += tmp.rjust(8,'0')
|
| 189 |
+
value = tmp.rjust(8, "0") + value
|
| 190 |
+
|
| 191 |
+
# Get type and length
|
| 192 |
+
type = int(value[:10], 2)
|
| 193 |
+
L = int(value[10:], 2)
|
| 194 |
+
L2 = L + 2
|
| 195 |
+
|
| 196 |
+
# Long tag header?
|
| 197 |
+
if L == 63: # '111111'
|
| 198 |
+
value = ""
|
| 199 |
+
for i in range(2, 6):
|
| 200 |
+
b = bb[i : i + 1] # becomes a single-byte bytes()
|
| 201 |
+
tmp = bin(ord(b))[2:]
|
| 202 |
+
# value += tmp.rjust(8,'0')
|
| 203 |
+
value = tmp.rjust(8, "0") + value
|
| 204 |
+
L = int(value, 2)
|
| 205 |
+
L2 = L + 6
|
| 206 |
+
|
| 207 |
+
# Done
|
| 208 |
+
return type, L, L2
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def signedint2bits(i, n=None):
|
| 212 |
+
"""convert signed int to a string of bits (0's and 1's in a string),
|
| 213 |
+
pad to n elements. Negative numbers are stored in 2's complement bit
|
| 214 |
+
patterns, thus positive numbers always start with a 0.
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
# negative number?
|
| 218 |
+
ii = i
|
| 219 |
+
if i < 0:
|
| 220 |
+
# A negative number, -n, is represented as the bitwise opposite of
|
| 221 |
+
ii = abs(ii) - 1 # the positive-zero number n-1.
|
| 222 |
+
|
| 223 |
+
# make bits
|
| 224 |
+
bb = BitArray()
|
| 225 |
+
while ii > 0:
|
| 226 |
+
bb += str(ii % 2)
|
| 227 |
+
ii = ii >> 1
|
| 228 |
+
bb.reverse()
|
| 229 |
+
|
| 230 |
+
# justify
|
| 231 |
+
bb = "0" + str(bb) # always need the sign bit in front
|
| 232 |
+
if n is not None:
|
| 233 |
+
if len(bb) > n: # pragma: no cover
|
| 234 |
+
raise ValueError("signedint2bits fail: len larger than padlength.")
|
| 235 |
+
bb = bb.rjust(n, "0")
|
| 236 |
+
|
| 237 |
+
# was it negative? (then opposite bits)
|
| 238 |
+
if i < 0:
|
| 239 |
+
bb = bb.replace("0", "x").replace("1", "0").replace("x", "1")
|
| 240 |
+
|
| 241 |
+
# done
|
| 242 |
+
return BitArray(bb)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def twits2bits(arr):
|
| 246 |
+
"""Given a few (signed) numbers, store them
|
| 247 |
+
as compactly as possible in the wat specifief by the swf format.
|
| 248 |
+
The numbers are multiplied by 20, assuming they
|
| 249 |
+
are twits.
|
| 250 |
+
Can be used to make the RECT record.
|
| 251 |
+
"""
|
| 252 |
+
|
| 253 |
+
# first determine length using non justified bit strings
|
| 254 |
+
maxlen = 1
|
| 255 |
+
for i in arr:
|
| 256 |
+
tmp = len(signedint2bits(i * 20))
|
| 257 |
+
if tmp > maxlen:
|
| 258 |
+
maxlen = tmp
|
| 259 |
+
|
| 260 |
+
# build array
|
| 261 |
+
bits = int2bits(maxlen, 5)
|
| 262 |
+
for i in arr:
|
| 263 |
+
bits += signedint2bits(i * 20, maxlen)
|
| 264 |
+
|
| 265 |
+
return bits
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def floats2bits(arr):
|
| 269 |
+
"""Given a few (signed) numbers, convert them to bits,
|
| 270 |
+
stored as FB (float bit values). We always use 16.16.
|
| 271 |
+
Negative numbers are not (yet) possible, because I don't
|
| 272 |
+
know how the're implemented (ambiguity).
|
| 273 |
+
"""
|
| 274 |
+
bits = int2bits(31, 5) # 32 does not fit in 5 bits!
|
| 275 |
+
for i in arr:
|
| 276 |
+
if i < 0: # pragma: no cover
|
| 277 |
+
raise ValueError("Dit not implement negative floats!")
|
| 278 |
+
i1 = int(i)
|
| 279 |
+
i2 = i - i1
|
| 280 |
+
bits += int2bits(i1, 15)
|
| 281 |
+
bits += int2bits(i2 * 2**16, 16)
|
| 282 |
+
return bits
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
# Base Tag
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
class Tag:
|
| 289 |
+
def __init__(self):
|
| 290 |
+
self.bytes = bytes()
|
| 291 |
+
self.tagtype = -1
|
| 292 |
+
|
| 293 |
+
def process_tag(self):
|
| 294 |
+
"""Implement this to create the tag."""
|
| 295 |
+
raise NotImplementedError()
|
| 296 |
+
|
| 297 |
+
def get_tag(self):
|
| 298 |
+
"""Calls processTag and attaches the header."""
|
| 299 |
+
self.process_tag()
|
| 300 |
+
|
| 301 |
+
# tag to binary
|
| 302 |
+
bits = int2bits(self.tagtype, 10)
|
| 303 |
+
|
| 304 |
+
# complete header uint16 thing
|
| 305 |
+
bits += "1" * 6 # = 63 = 0x3f
|
| 306 |
+
# make uint16
|
| 307 |
+
bb = int2uint16(int(str(bits), 2))
|
| 308 |
+
|
| 309 |
+
# now add 32bit length descriptor
|
| 310 |
+
bb += int2uint32(len(self.bytes))
|
| 311 |
+
|
| 312 |
+
# done, attach and return
|
| 313 |
+
bb += self.bytes
|
| 314 |
+
return bb
|
| 315 |
+
|
| 316 |
+
def make_rect_record(self, xmin, xmax, ymin, ymax):
|
| 317 |
+
"""Simply uses makeCompactArray to produce
|
| 318 |
+
a RECT Record."""
|
| 319 |
+
return twits2bits([xmin, xmax, ymin, ymax])
|
| 320 |
+
|
| 321 |
+
def make_matrix_record(self, scale_xy=None, rot_xy=None, trans_xy=None):
|
| 322 |
+
# empty matrix?
|
| 323 |
+
if scale_xy is None and rot_xy is None and trans_xy is None:
|
| 324 |
+
return "0" * 8
|
| 325 |
+
|
| 326 |
+
# init
|
| 327 |
+
bits = BitArray()
|
| 328 |
+
|
| 329 |
+
# scale
|
| 330 |
+
if scale_xy:
|
| 331 |
+
bits += "1"
|
| 332 |
+
bits += floats2bits([scale_xy[0], scale_xy[1]])
|
| 333 |
+
else:
|
| 334 |
+
bits += "0"
|
| 335 |
+
|
| 336 |
+
# rotation
|
| 337 |
+
if rot_xy:
|
| 338 |
+
bits += "1"
|
| 339 |
+
bits += floats2bits([rot_xy[0], rot_xy[1]])
|
| 340 |
+
else:
|
| 341 |
+
bits += "0"
|
| 342 |
+
|
| 343 |
+
# translation (no flag here)
|
| 344 |
+
if trans_xy:
|
| 345 |
+
bits += twits2bits([trans_xy[0], trans_xy[1]])
|
| 346 |
+
else:
|
| 347 |
+
bits += twits2bits([0, 0])
|
| 348 |
+
|
| 349 |
+
# done
|
| 350 |
+
return bits
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
# Control tags
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
class ControlTag(Tag):
|
| 357 |
+
def __init__(self):
|
| 358 |
+
Tag.__init__(self)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
class FileAttributesTag(ControlTag):
|
| 362 |
+
def __init__(self):
|
| 363 |
+
ControlTag.__init__(self)
|
| 364 |
+
self.tagtype = 69
|
| 365 |
+
|
| 366 |
+
def process_tag(self):
|
| 367 |
+
self.bytes = "\x00".encode("ascii") * (1 + 3)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
class ShowFrameTag(ControlTag):
|
| 371 |
+
def __init__(self):
|
| 372 |
+
ControlTag.__init__(self)
|
| 373 |
+
self.tagtype = 1
|
| 374 |
+
|
| 375 |
+
def process_tag(self):
|
| 376 |
+
self.bytes = bytes()
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
class SetBackgroundTag(ControlTag):
|
| 380 |
+
"""Set the color in 0-255, or 0-1 (if floats given)."""
|
| 381 |
+
|
| 382 |
+
def __init__(self, *rgb):
|
| 383 |
+
self.tagtype = 9
|
| 384 |
+
if len(rgb) == 1:
|
| 385 |
+
rgb = rgb[0]
|
| 386 |
+
self.rgb = rgb
|
| 387 |
+
|
| 388 |
+
def process_tag(self):
|
| 389 |
+
bb = bytes()
|
| 390 |
+
for i in range(3):
|
| 391 |
+
clr = self.rgb[i]
|
| 392 |
+
if isinstance(clr, float): # pragma: no cover - not used
|
| 393 |
+
clr = clr * 255
|
| 394 |
+
bb += int2uint8(clr)
|
| 395 |
+
self.bytes = bb
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
class DoActionTag(Tag):
|
| 399 |
+
def __init__(self, action="stop"):
|
| 400 |
+
Tag.__init__(self)
|
| 401 |
+
self.tagtype = 12
|
| 402 |
+
self.actions = [action]
|
| 403 |
+
|
| 404 |
+
def append(self, action): # pragma: no cover - not used
|
| 405 |
+
self.actions.append(action)
|
| 406 |
+
|
| 407 |
+
def process_tag(self):
|
| 408 |
+
bb = bytes()
|
| 409 |
+
|
| 410 |
+
for action in self.actions:
|
| 411 |
+
action = action.lower()
|
| 412 |
+
if action == "stop":
|
| 413 |
+
bb += "\x07".encode("ascii")
|
| 414 |
+
elif action == "play": # pragma: no cover - not used
|
| 415 |
+
bb += "\x06".encode("ascii")
|
| 416 |
+
else: # pragma: no cover
|
| 417 |
+
logger.warning("unknown action: %s" % action)
|
| 418 |
+
|
| 419 |
+
bb += int2uint8(0)
|
| 420 |
+
self.bytes = bb
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
# Definition tags
|
| 424 |
+
class DefinitionTag(Tag):
|
| 425 |
+
counter = 0 # to give automatically id's
|
| 426 |
+
|
| 427 |
+
def __init__(self):
|
| 428 |
+
Tag.__init__(self)
|
| 429 |
+
DefinitionTag.counter += 1
|
| 430 |
+
self.id = DefinitionTag.counter # id in dictionary
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
class BitmapTag(DefinitionTag):
|
| 434 |
+
def __init__(self, im):
|
| 435 |
+
DefinitionTag.__init__(self)
|
| 436 |
+
self.tagtype = 36 # DefineBitsLossless2
|
| 437 |
+
|
| 438 |
+
# convert image (note that format is ARGB)
|
| 439 |
+
# even a grayscale image is stored in ARGB, nevertheless,
|
| 440 |
+
# the fabilous deflate compression will make it that not much
|
| 441 |
+
# more data is required for storing (25% or so, and less than 10%
|
| 442 |
+
# when storing RGB as ARGB).
|
| 443 |
+
|
| 444 |
+
if len(im.shape) == 3:
|
| 445 |
+
if im.shape[2] in [3, 4]:
|
| 446 |
+
tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255
|
| 447 |
+
for i in range(3):
|
| 448 |
+
tmp[:, :, i + 1] = im[:, :, i]
|
| 449 |
+
if im.shape[2] == 4:
|
| 450 |
+
tmp[:, :, 0] = im[:, :, 3] # swap channel where alpha is
|
| 451 |
+
else: # pragma: no cover
|
| 452 |
+
raise ValueError("Invalid shape to be an image.")
|
| 453 |
+
|
| 454 |
+
elif len(im.shape) == 2:
|
| 455 |
+
tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255
|
| 456 |
+
for i in range(3):
|
| 457 |
+
tmp[:, :, i + 1] = im[:, :]
|
| 458 |
+
else: # pragma: no cover
|
| 459 |
+
raise ValueError("Invalid shape to be an image.")
|
| 460 |
+
|
| 461 |
+
# we changed the image to uint8 4 channels.
|
| 462 |
+
# now compress!
|
| 463 |
+
self._data = zlib.compress(tmp.tobytes(), zlib.DEFLATED)
|
| 464 |
+
self.imshape = im.shape
|
| 465 |
+
|
| 466 |
+
def process_tag(self):
|
| 467 |
+
# build tag
|
| 468 |
+
bb = bytes()
|
| 469 |
+
bb += int2uint16(self.id) # CharacterID
|
| 470 |
+
bb += int2uint8(5) # BitmapFormat
|
| 471 |
+
bb += int2uint16(self.imshape[1]) # BitmapWidth
|
| 472 |
+
bb += int2uint16(self.imshape[0]) # BitmapHeight
|
| 473 |
+
bb += self._data # ZlibBitmapData
|
| 474 |
+
|
| 475 |
+
self.bytes = bb
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class PlaceObjectTag(ControlTag):
|
| 479 |
+
def __init__(self, depth, idToPlace=None, xy=(0, 0), move=False):
|
| 480 |
+
ControlTag.__init__(self)
|
| 481 |
+
self.tagtype = 26
|
| 482 |
+
self.depth = depth
|
| 483 |
+
self.idToPlace = idToPlace
|
| 484 |
+
self.xy = xy
|
| 485 |
+
self.move = move
|
| 486 |
+
|
| 487 |
+
def process_tag(self):
|
| 488 |
+
# retrieve stuff
|
| 489 |
+
depth = self.depth
|
| 490 |
+
xy = self.xy
|
| 491 |
+
id = self.idToPlace
|
| 492 |
+
|
| 493 |
+
# build PlaceObject2
|
| 494 |
+
bb = bytes()
|
| 495 |
+
if self.move:
|
| 496 |
+
bb += "\x07".encode("ascii")
|
| 497 |
+
else:
|
| 498 |
+
# (8 bit flags): 4:matrix, 2:character, 1:move
|
| 499 |
+
bb += "\x06".encode("ascii")
|
| 500 |
+
bb += int2uint16(depth) # Depth
|
| 501 |
+
bb += int2uint16(id) # character id
|
| 502 |
+
bb += self.make_matrix_record(trans_xy=xy).tobytes() # MATRIX record
|
| 503 |
+
self.bytes = bb
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
class ShapeTag(DefinitionTag):
|
| 507 |
+
def __init__(self, bitmapId, xy, wh):
|
| 508 |
+
DefinitionTag.__init__(self)
|
| 509 |
+
self.tagtype = 2
|
| 510 |
+
self.bitmapId = bitmapId
|
| 511 |
+
self.xy = xy
|
| 512 |
+
self.wh = wh
|
| 513 |
+
|
| 514 |
+
def process_tag(self):
|
| 515 |
+
"""Returns a defineshape tag. with a bitmap fill"""
|
| 516 |
+
|
| 517 |
+
bb = bytes()
|
| 518 |
+
bb += int2uint16(self.id)
|
| 519 |
+
xy, wh = self.xy, self.wh
|
| 520 |
+
tmp = self.make_rect_record(xy[0], wh[0], xy[1], wh[1]) # ShapeBounds
|
| 521 |
+
bb += tmp.tobytes()
|
| 522 |
+
|
| 523 |
+
# make SHAPEWITHSTYLE structure
|
| 524 |
+
|
| 525 |
+
# first entry: FILLSTYLEARRAY with in it a single fill style
|
| 526 |
+
bb += int2uint8(1) # FillStyleCount
|
| 527 |
+
bb += "\x41".encode("ascii") # FillStyleType (0x41 or 0x43 unsmoothed)
|
| 528 |
+
bb += int2uint16(self.bitmapId) # BitmapId
|
| 529 |
+
# bb += '\x00' # BitmapMatrix (empty matrix with leftover bits filled)
|
| 530 |
+
bb += self.make_matrix_record(scale_xy=(20, 20)).tobytes()
|
| 531 |
+
|
| 532 |
+
# # first entry: FILLSTYLEARRAY with in it a single fill style
|
| 533 |
+
# bb += int2uint8(1) # FillStyleCount
|
| 534 |
+
# bb += '\x00' # solid fill
|
| 535 |
+
# bb += '\x00\x00\xff' # color
|
| 536 |
+
|
| 537 |
+
# second entry: LINESTYLEARRAY with a single line style
|
| 538 |
+
bb += int2uint8(0) # LineStyleCount
|
| 539 |
+
# bb += int2uint16(0*20) # Width
|
| 540 |
+
# bb += '\x00\xff\x00' # Color
|
| 541 |
+
|
| 542 |
+
# third and fourth entry: NumFillBits and NumLineBits (4 bits each)
|
| 543 |
+
# I each give them four bits, so 16 styles possible.
|
| 544 |
+
bb += "\x44".encode("ascii")
|
| 545 |
+
|
| 546 |
+
self.bytes = bb
|
| 547 |
+
|
| 548 |
+
# last entries: SHAPERECORDs ... (individual shape records not aligned)
|
| 549 |
+
# STYLECHANGERECORD
|
| 550 |
+
bits = BitArray()
|
| 551 |
+
bits += self.make_style_change_record(0, 1, moveTo=(self.wh[0], self.wh[1]))
|
| 552 |
+
# STRAIGHTEDGERECORD 4x
|
| 553 |
+
bits += self.make_straight_edge_record(-self.wh[0], 0)
|
| 554 |
+
bits += self.make_straight_edge_record(0, -self.wh[1])
|
| 555 |
+
bits += self.make_straight_edge_record(self.wh[0], 0)
|
| 556 |
+
bits += self.make_straight_edge_record(0, self.wh[1])
|
| 557 |
+
|
| 558 |
+
# ENDSHAPRECORD
|
| 559 |
+
bits += self.make_end_shape_record()
|
| 560 |
+
|
| 561 |
+
self.bytes += bits.tobytes()
|
| 562 |
+
|
| 563 |
+
# done
|
| 564 |
+
# self.bytes = bb
|
| 565 |
+
|
| 566 |
+
def make_style_change_record(self, lineStyle=None, fillStyle=None, moveTo=None):
|
| 567 |
+
# first 6 flags
|
| 568 |
+
# Note that we use FillStyle1. If we don't flash (at least 8) does not
|
| 569 |
+
# recognize the frames properly when importing to library.
|
| 570 |
+
|
| 571 |
+
bits = BitArray()
|
| 572 |
+
bits += "0" # TypeFlag (not an edge record)
|
| 573 |
+
bits += "0" # StateNewStyles (only for DefineShape2 and Defineshape3)
|
| 574 |
+
if lineStyle:
|
| 575 |
+
bits += "1" # StateLineStyle
|
| 576 |
+
else:
|
| 577 |
+
bits += "0"
|
| 578 |
+
if fillStyle:
|
| 579 |
+
bits += "1" # StateFillStyle1
|
| 580 |
+
else:
|
| 581 |
+
bits += "0"
|
| 582 |
+
bits += "0" # StateFillStyle0
|
| 583 |
+
if moveTo:
|
| 584 |
+
bits += "1" # StateMoveTo
|
| 585 |
+
else:
|
| 586 |
+
bits += "0"
|
| 587 |
+
|
| 588 |
+
# give information
|
| 589 |
+
# todo: nbits for fillStyle and lineStyle is hard coded.
|
| 590 |
+
|
| 591 |
+
if moveTo:
|
| 592 |
+
bits += twits2bits([moveTo[0], moveTo[1]])
|
| 593 |
+
if fillStyle:
|
| 594 |
+
bits += int2bits(fillStyle, 4)
|
| 595 |
+
if lineStyle:
|
| 596 |
+
bits += int2bits(lineStyle, 4)
|
| 597 |
+
|
| 598 |
+
return bits
|
| 599 |
+
|
| 600 |
+
def make_straight_edge_record(self, *dxdy):
|
| 601 |
+
if len(dxdy) == 1:
|
| 602 |
+
dxdy = dxdy[0]
|
| 603 |
+
|
| 604 |
+
# determine required number of bits
|
| 605 |
+
xbits = signedint2bits(dxdy[0] * 20)
|
| 606 |
+
ybits = signedint2bits(dxdy[1] * 20)
|
| 607 |
+
nbits = max([len(xbits), len(ybits)])
|
| 608 |
+
|
| 609 |
+
bits = BitArray()
|
| 610 |
+
bits += "11" # TypeFlag and StraightFlag
|
| 611 |
+
bits += int2bits(nbits - 2, 4)
|
| 612 |
+
bits += "1" # GeneralLineFlag
|
| 613 |
+
bits += signedint2bits(dxdy[0] * 20, nbits)
|
| 614 |
+
bits += signedint2bits(dxdy[1] * 20, nbits)
|
| 615 |
+
|
| 616 |
+
# note: I do not make use of vertical/horizontal only lines...
|
| 617 |
+
|
| 618 |
+
return bits
|
| 619 |
+
|
| 620 |
+
def make_end_shape_record(self):
|
| 621 |
+
bits = BitArray()
|
| 622 |
+
bits += "0" # TypeFlag: no edge
|
| 623 |
+
bits += "0" * 5 # EndOfShape
|
| 624 |
+
return bits
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def read_pixels(bb, i, tagType, L1):
|
| 628 |
+
"""With pf's seed after the recordheader, reads the pixeldata."""
|
| 629 |
+
|
| 630 |
+
# Get info
|
| 631 |
+
charId = bb[i : i + 2] # noqa
|
| 632 |
+
i += 2
|
| 633 |
+
format = ord(bb[i : i + 1])
|
| 634 |
+
i += 1
|
| 635 |
+
width = bits2int(bb[i : i + 2], 16)
|
| 636 |
+
i += 2
|
| 637 |
+
height = bits2int(bb[i : i + 2], 16)
|
| 638 |
+
i += 2
|
| 639 |
+
|
| 640 |
+
# If we can, get pixeldata and make numpy array
|
| 641 |
+
if format != 5:
|
| 642 |
+
logger.warning("Can only read 24bit or 32bit RGB(A) lossless images.")
|
| 643 |
+
else:
|
| 644 |
+
# Read byte data
|
| 645 |
+
offset = 2 + 1 + 2 + 2 # all the info bits
|
| 646 |
+
bb2 = bb[i : i + (L1 - offset)]
|
| 647 |
+
|
| 648 |
+
# Decompress and make numpy array
|
| 649 |
+
data = zlib.decompress(bb2)
|
| 650 |
+
a = np.frombuffer(data, dtype=np.uint8)
|
| 651 |
+
|
| 652 |
+
# Set shape
|
| 653 |
+
if tagType == 20:
|
| 654 |
+
# DefineBitsLossless - RGB data
|
| 655 |
+
try:
|
| 656 |
+
a.shape = height, width, 3
|
| 657 |
+
except Exception:
|
| 658 |
+
# Byte align stuff might cause troubles
|
| 659 |
+
logger.warning("Cannot read image due to byte alignment")
|
| 660 |
+
if tagType == 36:
|
| 661 |
+
# DefineBitsLossless2 - ARGB data
|
| 662 |
+
a.shape = height, width, 4
|
| 663 |
+
# Swap alpha channel to make RGBA
|
| 664 |
+
b = a
|
| 665 |
+
a = np.zeros_like(a)
|
| 666 |
+
a[:, :, 0] = b[:, :, 1]
|
| 667 |
+
a[:, :, 1] = b[:, :, 2]
|
| 668 |
+
a[:, :, 2] = b[:, :, 3]
|
| 669 |
+
a[:, :, 3] = b[:, :, 0]
|
| 670 |
+
|
| 671 |
+
return a
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
# Last few functions
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
# These are the original public functions, we don't use them, but we
|
| 678 |
+
# keep it so that in principle this module can be used stand-alone.
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
def checkImages(images): # pragma: no cover
|
| 682 |
+
"""checkImages(images)
|
| 683 |
+
Check numpy images and correct intensity range etc.
|
| 684 |
+
The same for all movie formats.
|
| 685 |
+
"""
|
| 686 |
+
# Init results
|
| 687 |
+
images2 = []
|
| 688 |
+
|
| 689 |
+
for im in images:
|
| 690 |
+
if isinstance(im, np.ndarray):
|
| 691 |
+
# Check and convert dtype
|
| 692 |
+
if im.dtype == np.uint8:
|
| 693 |
+
images2.append(im) # Ok
|
| 694 |
+
elif im.dtype in [np.float32, np.float64]:
|
| 695 |
+
theMax = im.max()
|
| 696 |
+
if 128 < theMax < 300:
|
| 697 |
+
pass # assume 0:255
|
| 698 |
+
else:
|
| 699 |
+
im = im.copy()
|
| 700 |
+
im[im < 0] = 0
|
| 701 |
+
im[im > 1] = 1
|
| 702 |
+
im *= 255
|
| 703 |
+
images2.append(im.astype(np.uint8))
|
| 704 |
+
else:
|
| 705 |
+
im = im.astype(np.uint8)
|
| 706 |
+
images2.append(im)
|
| 707 |
+
# Check size
|
| 708 |
+
if im.ndim == 2:
|
| 709 |
+
pass # ok
|
| 710 |
+
elif im.ndim == 3:
|
| 711 |
+
if im.shape[2] not in [3, 4]:
|
| 712 |
+
raise ValueError("This array can not represent an image.")
|
| 713 |
+
else:
|
| 714 |
+
raise ValueError("This array can not represent an image.")
|
| 715 |
+
else:
|
| 716 |
+
raise ValueError("Invalid image type: " + str(type(im)))
|
| 717 |
+
|
| 718 |
+
# Done
|
| 719 |
+
return images2
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
def build_file(
|
| 723 |
+
fp, taglist, nframes=1, framesize=(500, 500), fps=10, version=8
|
| 724 |
+
): # pragma: no cover
|
| 725 |
+
"""Give the given file (as bytes) a header."""
|
| 726 |
+
|
| 727 |
+
# compose header
|
| 728 |
+
bb = bytes()
|
| 729 |
+
bb += "F".encode("ascii") # uncompressed
|
| 730 |
+
bb += "WS".encode("ascii") # signature bytes
|
| 731 |
+
bb += int2uint8(version) # version
|
| 732 |
+
bb += "0000".encode("ascii") # FileLength (leave open for now)
|
| 733 |
+
bb += Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes()
|
| 734 |
+
bb += int2uint8(0) + int2uint8(fps) # FrameRate
|
| 735 |
+
bb += int2uint16(nframes)
|
| 736 |
+
fp.write(bb)
|
| 737 |
+
|
| 738 |
+
# produce all tags
|
| 739 |
+
for tag in taglist:
|
| 740 |
+
fp.write(tag.get_tag())
|
| 741 |
+
|
| 742 |
+
# finish with end tag
|
| 743 |
+
fp.write("\x00\x00".encode("ascii"))
|
| 744 |
+
|
| 745 |
+
# set size
|
| 746 |
+
sze = fp.tell()
|
| 747 |
+
fp.seek(4)
|
| 748 |
+
fp.write(int2uint32(sze))
|
| 749 |
+
|
| 750 |
+
|
| 751 |
+
def write_swf(filename, images, duration=0.1, repeat=True): # pragma: no cover
|
| 752 |
+
"""Write an swf-file from the specified images. If repeat is False,
|
| 753 |
+
the movie is finished with a stop action. Duration may also
|
| 754 |
+
be a list with durations for each frame (note that the duration
|
| 755 |
+
for each frame is always an integer amount of the minimum duration.)
|
| 756 |
+
|
| 757 |
+
Images should be a list consisting numpy arrays with values between
|
| 758 |
+
0 and 255 for integer types, and between 0 and 1 for float types.
|
| 759 |
+
|
| 760 |
+
"""
|
| 761 |
+
|
| 762 |
+
# Check images
|
| 763 |
+
images2 = checkImages(images)
|
| 764 |
+
|
| 765 |
+
# Init
|
| 766 |
+
taglist = [FileAttributesTag(), SetBackgroundTag(0, 0, 0)]
|
| 767 |
+
|
| 768 |
+
# Check duration
|
| 769 |
+
if hasattr(duration, "__len__"):
|
| 770 |
+
if len(duration) == len(images2):
|
| 771 |
+
duration = [d for d in duration]
|
| 772 |
+
else:
|
| 773 |
+
raise ValueError("len(duration) doesn't match amount of images.")
|
| 774 |
+
else:
|
| 775 |
+
duration = [duration for im in images2]
|
| 776 |
+
|
| 777 |
+
# Build delays list
|
| 778 |
+
minDuration = float(min(duration))
|
| 779 |
+
delays = [round(d / minDuration) for d in duration]
|
| 780 |
+
delays = [max(1, int(d)) for d in delays]
|
| 781 |
+
|
| 782 |
+
# Get FPS
|
| 783 |
+
fps = 1.0 / minDuration
|
| 784 |
+
|
| 785 |
+
# Produce series of tags for each image
|
| 786 |
+
# t0 = time.time()
|
| 787 |
+
nframes = 0
|
| 788 |
+
for im in images2:
|
| 789 |
+
bm = BitmapTag(im)
|
| 790 |
+
wh = (im.shape[1], im.shape[0])
|
| 791 |
+
sh = ShapeTag(bm.id, (0, 0), wh)
|
| 792 |
+
po = PlaceObjectTag(1, sh.id, move=nframes > 0)
|
| 793 |
+
taglist.extend([bm, sh, po])
|
| 794 |
+
for i in range(delays[nframes]):
|
| 795 |
+
taglist.append(ShowFrameTag())
|
| 796 |
+
nframes += 1
|
| 797 |
+
|
| 798 |
+
if not repeat:
|
| 799 |
+
taglist.append(DoActionTag("stop"))
|
| 800 |
+
|
| 801 |
+
# Build file
|
| 802 |
+
# t1 = time.time()
|
| 803 |
+
fp = open(filename, "wb")
|
| 804 |
+
try:
|
| 805 |
+
build_file(fp, taglist, nframes=nframes, framesize=wh, fps=fps)
|
| 806 |
+
except Exception:
|
| 807 |
+
raise
|
| 808 |
+
finally:
|
| 809 |
+
fp.close()
|
| 810 |
+
# t2 = time.time()
|
| 811 |
+
|
| 812 |
+
# logger.warning("Writing SWF took %1.2f and %1.2f seconds" % (t1-t0, t2-t1) )
|
| 813 |
+
|
| 814 |
+
|
| 815 |
+
def read_swf(filename): # pragma: no cover
|
| 816 |
+
"""Read all images from an SWF (shockwave flash) file. Returns a list
|
| 817 |
+
of numpy arrays.
|
| 818 |
+
|
| 819 |
+
Limitation: only read the PNG encoded images (not the JPG encoded ones).
|
| 820 |
+
"""
|
| 821 |
+
|
| 822 |
+
# Check whether it exists
|
| 823 |
+
if not os.path.isfile(filename):
|
| 824 |
+
raise IOError("File not found: " + str(filename))
|
| 825 |
+
|
| 826 |
+
# Init images
|
| 827 |
+
images = []
|
| 828 |
+
|
| 829 |
+
# Open file and read all
|
| 830 |
+
fp = open(filename, "rb")
|
| 831 |
+
bb = fp.read()
|
| 832 |
+
|
| 833 |
+
try:
|
| 834 |
+
# Check opening tag
|
| 835 |
+
tmp = bb[0:3].decode("ascii", "ignore")
|
| 836 |
+
if tmp.upper() == "FWS":
|
| 837 |
+
pass # ok
|
| 838 |
+
elif tmp.upper() == "CWS":
|
| 839 |
+
# Decompress movie
|
| 840 |
+
bb = bb[:8] + zlib.decompress(bb[8:])
|
| 841 |
+
else:
|
| 842 |
+
raise IOError("Not a valid SWF file: " + str(filename))
|
| 843 |
+
|
| 844 |
+
# Set filepointer at first tag (skipping framesize RECT and two uin16's
|
| 845 |
+
i = 8
|
| 846 |
+
nbits = bits2int(bb[i : i + 1], 5) # skip FrameSize
|
| 847 |
+
nbits = 5 + nbits * 4
|
| 848 |
+
Lrect = nbits / 8.0
|
| 849 |
+
if Lrect % 1:
|
| 850 |
+
Lrect += 1
|
| 851 |
+
Lrect = int(Lrect)
|
| 852 |
+
i += Lrect + 4
|
| 853 |
+
|
| 854 |
+
# Iterate over the tags
|
| 855 |
+
counter = 0
|
| 856 |
+
while True:
|
| 857 |
+
counter += 1
|
| 858 |
+
|
| 859 |
+
# Get tag header
|
| 860 |
+
head = bb[i : i + 6]
|
| 861 |
+
if not head:
|
| 862 |
+
break # Done (we missed end tag)
|
| 863 |
+
|
| 864 |
+
# Determine type and length
|
| 865 |
+
T, L1, L2 = get_type_and_len(head)
|
| 866 |
+
if not L2:
|
| 867 |
+
logger.warning("Invalid tag length, could not proceed")
|
| 868 |
+
break
|
| 869 |
+
# logger.warning(T, L2)
|
| 870 |
+
|
| 871 |
+
# Read image if we can
|
| 872 |
+
if T in [20, 36]:
|
| 873 |
+
im = read_pixels(bb, i + 6, T, L1)
|
| 874 |
+
if im is not None:
|
| 875 |
+
images.append(im)
|
| 876 |
+
elif T in [6, 21, 35, 90]:
|
| 877 |
+
logger.warning("Ignoring JPEG image: cannot read JPEG.")
|
| 878 |
+
else:
|
| 879 |
+
pass # Not an image tag
|
| 880 |
+
|
| 881 |
+
# Detect end tag
|
| 882 |
+
if T == 0:
|
| 883 |
+
break
|
| 884 |
+
|
| 885 |
+
# Next tag!
|
| 886 |
+
i += L2
|
| 887 |
+
|
| 888 |
+
finally:
|
| 889 |
+
fp.close()
|
| 890 |
+
|
| 891 |
+
# Done
|
| 892 |
+
return images
|
| 893 |
+
|
| 894 |
+
|
| 895 |
+
# Backward compatibility; same public names as when this was images2swf.
|
| 896 |
+
writeSwf = write_swf
|
| 897 |
+
readSwf = read_swf
|
parrot/lib/python3.10/site-packages/imageio/plugins/_tifffile.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
parrot/lib/python3.10/site-packages/imageio/plugins/bsdf.py
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 3 |
+
|
| 4 |
+
""" Read/Write BSDF files.
|
| 5 |
+
|
| 6 |
+
Backend Library: internal
|
| 7 |
+
|
| 8 |
+
The BSDF format enables reading and writing of image data in the
|
| 9 |
+
BSDF serialization format. This format allows storage of images, volumes,
|
| 10 |
+
and series thereof. Data can be of any numeric data type, and can
|
| 11 |
+
optionally be compressed. Each image/volume can have associated
|
| 12 |
+
meta data, which can consist of any data type supported by BSDF.
|
| 13 |
+
|
| 14 |
+
By default, image data is lazily loaded; the actual image data is
|
| 15 |
+
not read until it is requested. This allows storing multiple images
|
| 16 |
+
in a single file and still have fast access to individual images.
|
| 17 |
+
Alternatively, a series of images can be read in streaming mode, reading
|
| 18 |
+
images as they are read (e.g. from http).
|
| 19 |
+
|
| 20 |
+
BSDF is a simple generic binary format. It is easy to extend and there
|
| 21 |
+
are standard extension definitions for 2D and 3D image data.
|
| 22 |
+
Read more at http://bsdf.io.
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
Parameters
|
| 26 |
+
----------
|
| 27 |
+
random_access : bool
|
| 28 |
+
Whether individual images in the file can be read in random order.
|
| 29 |
+
Defaults to True for normal files, and to False when reading from HTTP.
|
| 30 |
+
If False, the file is read in "streaming mode", allowing reading
|
| 31 |
+
files as they are read, but without support for "rewinding".
|
| 32 |
+
Note that setting this to True when reading from HTTP, the whole file
|
| 33 |
+
is read upon opening it (since lazy loading is not possible over HTTP).
|
| 34 |
+
|
| 35 |
+
compression : int
|
| 36 |
+
Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib
|
| 37 |
+
compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2
|
| 38 |
+
compression (more compact but slower). Default 1 (zlib).
|
| 39 |
+
Note that some BSDF implementations may not support compression
|
| 40 |
+
(e.g. JavaScript).
|
| 41 |
+
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
import numpy as np
|
| 45 |
+
|
| 46 |
+
from ..core import Format
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def get_bsdf_serializer(options):
|
| 50 |
+
from . import _bsdf as bsdf
|
| 51 |
+
|
| 52 |
+
class NDArrayExtension(bsdf.Extension):
|
| 53 |
+
"""Copy of BSDF's NDArrayExtension but deal with lazy blobs."""
|
| 54 |
+
|
| 55 |
+
name = "ndarray"
|
| 56 |
+
cls = np.ndarray
|
| 57 |
+
|
| 58 |
+
def encode(self, s, v):
|
| 59 |
+
return dict(shape=v.shape, dtype=str(v.dtype), data=v.tobytes())
|
| 60 |
+
|
| 61 |
+
def decode(self, s, v):
|
| 62 |
+
return v # return as dict, because of lazy blobs, decode in Image
|
| 63 |
+
|
| 64 |
+
class ImageExtension(bsdf.Extension):
|
| 65 |
+
"""We implement two extensions that trigger on the Image classes."""
|
| 66 |
+
|
| 67 |
+
def encode(self, s, v):
|
| 68 |
+
return dict(array=v.array, meta=v.meta)
|
| 69 |
+
|
| 70 |
+
def decode(self, s, v):
|
| 71 |
+
return Image(v["array"], v["meta"])
|
| 72 |
+
|
| 73 |
+
class Image2DExtension(ImageExtension):
|
| 74 |
+
name = "image2d"
|
| 75 |
+
cls = Image2D
|
| 76 |
+
|
| 77 |
+
class Image3DExtension(ImageExtension):
|
| 78 |
+
name = "image3d"
|
| 79 |
+
cls = Image3D
|
| 80 |
+
|
| 81 |
+
exts = [NDArrayExtension, Image2DExtension, Image3DExtension]
|
| 82 |
+
serializer = bsdf.BsdfSerializer(exts, **options)
|
| 83 |
+
|
| 84 |
+
return bsdf, serializer
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class Image:
|
| 88 |
+
"""Class in which we wrap the array and meta data. By using an extension
|
| 89 |
+
we can make BSDF trigger on these classes and thus encode the images.
|
| 90 |
+
as actual images.
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
def __init__(self, array, meta):
|
| 94 |
+
self.array = array
|
| 95 |
+
self.meta = meta
|
| 96 |
+
|
| 97 |
+
def get_array(self):
|
| 98 |
+
if not isinstance(self.array, np.ndarray):
|
| 99 |
+
v = self.array
|
| 100 |
+
blob = v["data"]
|
| 101 |
+
if not isinstance(blob, bytes): # then it's a lazy bsdf.Blob
|
| 102 |
+
blob = blob.get_bytes()
|
| 103 |
+
self.array = np.frombuffer(blob, dtype=v["dtype"])
|
| 104 |
+
self.array.shape = v["shape"]
|
| 105 |
+
return self.array
|
| 106 |
+
|
| 107 |
+
def get_meta(self):
|
| 108 |
+
return self.meta
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class Image2D(Image):
|
| 112 |
+
pass
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class Image3D(Image):
|
| 116 |
+
pass
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class BsdfFormat(Format):
|
| 120 |
+
"""The BSDF format enables reading and writing of image data in the
|
| 121 |
+
BSDF serialization format. This format allows storage of images, volumes,
|
| 122 |
+
and series thereof. Data can be of any numeric data type, and can
|
| 123 |
+
optionally be compressed. Each image/volume can have associated
|
| 124 |
+
meta data, which can consist of any data type supported by BSDF.
|
| 125 |
+
|
| 126 |
+
By default, image data is lazily loaded; the actual image data is
|
| 127 |
+
not read until it is requested. This allows storing multiple images
|
| 128 |
+
in a single file and still have fast access to individual images.
|
| 129 |
+
Alternatively, a series of images can be read in streaming mode, reading
|
| 130 |
+
images as they are read (e.g. from http).
|
| 131 |
+
|
| 132 |
+
BSDF is a simple generic binary format. It is easy to extend and there
|
| 133 |
+
are standard extension definitions for 2D and 3D image data.
|
| 134 |
+
Read more at http://bsdf.io.
|
| 135 |
+
|
| 136 |
+
Parameters for reading
|
| 137 |
+
----------------------
|
| 138 |
+
random_access : bool
|
| 139 |
+
Whether individual images in the file can be read in random order.
|
| 140 |
+
Defaults to True for normal files, and to False when reading from HTTP.
|
| 141 |
+
If False, the file is read in "streaming mode", allowing reading
|
| 142 |
+
files as they are read, but without support for "rewinding".
|
| 143 |
+
Note that setting this to True when reading from HTTP, the whole file
|
| 144 |
+
is read upon opening it (since lazy loading is not possible over HTTP).
|
| 145 |
+
|
| 146 |
+
Parameters for saving
|
| 147 |
+
---------------------
|
| 148 |
+
compression : {0, 1, 2}
|
| 149 |
+
Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib
|
| 150 |
+
compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2
|
| 151 |
+
compression (more compact but slower). Default 1 (zlib).
|
| 152 |
+
Note that some BSDF implementations may not support compression
|
| 153 |
+
(e.g. JavaScript).
|
| 154 |
+
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
def _can_read(self, request):
|
| 158 |
+
if request.mode[1] in (self.modes + "?"):
|
| 159 |
+
# if request.extension in self.extensions:
|
| 160 |
+
# return True
|
| 161 |
+
if request.firstbytes.startswith(b"BSDF"):
|
| 162 |
+
return True
|
| 163 |
+
|
| 164 |
+
def _can_write(self, request):
|
| 165 |
+
if request.mode[1] in (self.modes + "?"):
|
| 166 |
+
if request.extension in self.extensions:
|
| 167 |
+
return True
|
| 168 |
+
|
| 169 |
+
# -- reader
|
| 170 |
+
|
| 171 |
+
class Reader(Format.Reader):
|
| 172 |
+
def _open(self, random_access=None):
|
| 173 |
+
# Validate - we need a BSDF file consisting of a list of images
|
| 174 |
+
# The list is typically a stream, but does not have to be.
|
| 175 |
+
assert self.request.firstbytes[:4] == b"BSDF", "Not a BSDF file"
|
| 176 |
+
# self.request.firstbytes[5:6] == major and minor version
|
| 177 |
+
if not (
|
| 178 |
+
self.request.firstbytes[6:15] == b"M\x07image2D"
|
| 179 |
+
or self.request.firstbytes[6:15] == b"M\x07image3D"
|
| 180 |
+
or self.request.firstbytes[6:7] == b"l"
|
| 181 |
+
):
|
| 182 |
+
pass # Actually, follow a more duck-type approach ...
|
| 183 |
+
# raise RuntimeError('BSDF file does not look like an '
|
| 184 |
+
# 'image container.')
|
| 185 |
+
# Set options. If we think that seeking is allowed, we lazily load
|
| 186 |
+
# blobs, and set streaming to False (i.e. the whole file is read,
|
| 187 |
+
# but we skip over binary blobs), so that we subsequently allow
|
| 188 |
+
# random access to the images.
|
| 189 |
+
# If seeking is not allowed (e.g. with a http request), we cannot
|
| 190 |
+
# lazily load blobs, but we can still load streaming from the web.
|
| 191 |
+
options = {}
|
| 192 |
+
if self.request.filename.startswith(("http://", "https://")):
|
| 193 |
+
ra = False if random_access is None else bool(random_access)
|
| 194 |
+
options["lazy_blob"] = False # Because we cannot seek now
|
| 195 |
+
options["load_streaming"] = not ra # Load as a stream?
|
| 196 |
+
else:
|
| 197 |
+
ra = True if random_access is None else bool(random_access)
|
| 198 |
+
options["lazy_blob"] = ra # Don't read data until needed
|
| 199 |
+
options["load_streaming"] = not ra
|
| 200 |
+
|
| 201 |
+
file = self.request.get_file()
|
| 202 |
+
bsdf, self._serializer = get_bsdf_serializer(options)
|
| 203 |
+
self._stream = self._serializer.load(file)
|
| 204 |
+
# Another validation
|
| 205 |
+
if (
|
| 206 |
+
isinstance(self._stream, dict)
|
| 207 |
+
and "meta" in self._stream
|
| 208 |
+
and "array" in self._stream
|
| 209 |
+
):
|
| 210 |
+
self._stream = Image(self._stream["array"], self._stream["meta"])
|
| 211 |
+
if not isinstance(self._stream, (Image, list, bsdf.ListStream)):
|
| 212 |
+
raise RuntimeError(
|
| 213 |
+
"BSDF file does not look seem to have an " "image container."
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
def _close(self):
|
| 217 |
+
pass
|
| 218 |
+
|
| 219 |
+
def _get_length(self):
|
| 220 |
+
if isinstance(self._stream, Image):
|
| 221 |
+
return 1
|
| 222 |
+
elif isinstance(self._stream, list):
|
| 223 |
+
return len(self._stream)
|
| 224 |
+
elif self._stream.count < 0:
|
| 225 |
+
return np.inf
|
| 226 |
+
return self._stream.count
|
| 227 |
+
|
| 228 |
+
def _get_data(self, index):
|
| 229 |
+
# Validate
|
| 230 |
+
if index < 0 or index >= self.get_length():
|
| 231 |
+
raise IndexError(
|
| 232 |
+
"Image index %i not in [0 %i]." % (index, self.get_length())
|
| 233 |
+
)
|
| 234 |
+
# Get Image object
|
| 235 |
+
if isinstance(self._stream, Image):
|
| 236 |
+
image_ob = self._stream # singleton
|
| 237 |
+
elif isinstance(self._stream, list):
|
| 238 |
+
# Easy when we have random access
|
| 239 |
+
image_ob = self._stream[index]
|
| 240 |
+
else:
|
| 241 |
+
# For streaming, we need to skip over frames
|
| 242 |
+
if index < self._stream.index:
|
| 243 |
+
raise IndexError(
|
| 244 |
+
"BSDF file is being read in streaming "
|
| 245 |
+
"mode, thus does not allow rewinding."
|
| 246 |
+
)
|
| 247 |
+
while index > self._stream.index:
|
| 248 |
+
self._stream.next()
|
| 249 |
+
image_ob = self._stream.next() # Can raise StopIteration
|
| 250 |
+
# Is this an image?
|
| 251 |
+
if (
|
| 252 |
+
isinstance(image_ob, dict)
|
| 253 |
+
and "meta" in image_ob
|
| 254 |
+
and "array" in image_ob
|
| 255 |
+
):
|
| 256 |
+
image_ob = Image(image_ob["array"], image_ob["meta"])
|
| 257 |
+
if isinstance(image_ob, Image):
|
| 258 |
+
# Return as array (if we have lazy blobs, they are read now)
|
| 259 |
+
return image_ob.get_array(), image_ob.get_meta()
|
| 260 |
+
else:
|
| 261 |
+
r = repr(image_ob)
|
| 262 |
+
r = r if len(r) < 200 else r[:197] + "..."
|
| 263 |
+
raise RuntimeError("BSDF file contains non-image " + r)
|
| 264 |
+
|
| 265 |
+
def _get_meta_data(self, index): # pragma: no cover
|
| 266 |
+
return {} # This format does not support global meta data
|
| 267 |
+
|
| 268 |
+
# -- writer
|
| 269 |
+
|
| 270 |
+
class Writer(Format.Writer):
|
| 271 |
+
def _open(self, compression=1):
|
| 272 |
+
options = {"compression": compression}
|
| 273 |
+
bsdf, self._serializer = get_bsdf_serializer(options)
|
| 274 |
+
if self.request.mode[1] in "iv":
|
| 275 |
+
self._stream = None # Singleton image
|
| 276 |
+
self._written = False
|
| 277 |
+
else:
|
| 278 |
+
# Series (stream) of images
|
| 279 |
+
file = self.request.get_file()
|
| 280 |
+
self._stream = bsdf.ListStream()
|
| 281 |
+
self._serializer.save(file, self._stream)
|
| 282 |
+
|
| 283 |
+
def _close(self):
|
| 284 |
+
# We close the stream here, which will mark the number of written
|
| 285 |
+
# elements. If we would not close it, the file would be fine, it's
|
| 286 |
+
# just that upon reading it would not be known how many items are
|
| 287 |
+
# in there.
|
| 288 |
+
if self._stream is not None:
|
| 289 |
+
self._stream.close(False) # False says "keep this a stream"
|
| 290 |
+
|
| 291 |
+
def _append_data(self, im, meta):
|
| 292 |
+
# Determine dimension
|
| 293 |
+
ndim = None
|
| 294 |
+
if self.request.mode[1] in "iI":
|
| 295 |
+
ndim = 2
|
| 296 |
+
elif self.request.mode[1] in "vV":
|
| 297 |
+
ndim = 3
|
| 298 |
+
else:
|
| 299 |
+
ndim = 3 # Make an educated guess
|
| 300 |
+
if im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4):
|
| 301 |
+
ndim = 2
|
| 302 |
+
# Validate shape
|
| 303 |
+
assert ndim in (2, 3)
|
| 304 |
+
if ndim == 2:
|
| 305 |
+
assert im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4)
|
| 306 |
+
else:
|
| 307 |
+
assert im.ndim == 3 or (im.ndim == 4 and im.shape[-1] <= 4)
|
| 308 |
+
# Wrap data and meta data in our special class that will trigger
|
| 309 |
+
# the BSDF image2D or image3D extension.
|
| 310 |
+
if ndim == 2:
|
| 311 |
+
ob = Image2D(im, meta)
|
| 312 |
+
else:
|
| 313 |
+
ob = Image3D(im, meta)
|
| 314 |
+
# Write directly or to stream
|
| 315 |
+
if self._stream is None:
|
| 316 |
+
assert not self._written, "Cannot write singleton image twice"
|
| 317 |
+
self._written = True
|
| 318 |
+
file = self.request.get_file()
|
| 319 |
+
self._serializer.save(file, ob)
|
| 320 |
+
else:
|
| 321 |
+
self._stream.append(ob)
|
| 322 |
+
|
| 323 |
+
def set_meta_data(self, meta): # pragma: no cover
|
| 324 |
+
raise RuntimeError("The BSDF format only supports " "per-image meta data.")
|
parrot/lib/python3.10/site-packages/imageio/plugins/dicom.py
ADDED
|
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 3 |
+
|
| 4 |
+
"""Read DICOM files.
|
| 5 |
+
|
| 6 |
+
Backend Library: internal
|
| 7 |
+
|
| 8 |
+
A format for reading DICOM images: a common format used to store
|
| 9 |
+
medical image data, such as X-ray, CT and MRI.
|
| 10 |
+
|
| 11 |
+
This format borrows some code (and ideas) from the pydicom project. However,
|
| 12 |
+
only a predefined subset of tags are extracted from the file. This allows
|
| 13 |
+
for great simplifications allowing us to make a stand-alone reader, and
|
| 14 |
+
also results in a much faster read time.
|
| 15 |
+
|
| 16 |
+
By default, only uncompressed and deflated transfer syntaxes are supported.
|
| 17 |
+
If gdcm or dcmtk is installed, these will be used to automatically convert
|
| 18 |
+
the data. See https://github.com/malaterre/GDCM/releases for installing GDCM.
|
| 19 |
+
|
| 20 |
+
This format provides functionality to group images of the same
|
| 21 |
+
series together, thus extracting volumes (and multiple volumes).
|
| 22 |
+
Using volread will attempt to yield a volume. If multiple volumes
|
| 23 |
+
are present, the first one is given. Using mimread will simply yield
|
| 24 |
+
all images in the given directory (not taking series into account).
|
| 25 |
+
|
| 26 |
+
Parameters
|
| 27 |
+
----------
|
| 28 |
+
progress : {True, False, BaseProgressIndicator}
|
| 29 |
+
Whether to show progress when reading from multiple files.
|
| 30 |
+
Default True. By passing an object that inherits from
|
| 31 |
+
BaseProgressIndicator, the way in which progress is reported
|
| 32 |
+
can be costumized.
|
| 33 |
+
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
# todo: Use pydicom:
|
| 37 |
+
# * Note: is not py3k ready yet
|
| 38 |
+
# * Allow reading the full meta info
|
| 39 |
+
# I think we can more or less replace the SimpleDicomReader with a
|
| 40 |
+
# pydicom.Dataset For series, only ned to read the full info from one
|
| 41 |
+
# file: speed still high
|
| 42 |
+
# * Perhaps allow writing?
|
| 43 |
+
|
| 44 |
+
import os
|
| 45 |
+
import sys
|
| 46 |
+
import logging
|
| 47 |
+
import subprocess
|
| 48 |
+
|
| 49 |
+
from ..core import Format, BaseProgressIndicator, StdoutProgressIndicator
|
| 50 |
+
from ..core import read_n_bytes
|
| 51 |
+
|
| 52 |
+
_dicom = None # lazily loaded in load_lib()
|
| 53 |
+
|
| 54 |
+
logger = logging.getLogger(__name__)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def load_lib():
|
| 58 |
+
global _dicom
|
| 59 |
+
from . import _dicom
|
| 60 |
+
|
| 61 |
+
return _dicom
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# Determine endianity of system
|
| 65 |
+
sys_is_little_endian = sys.byteorder == "little"
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def get_dcmdjpeg_exe():
|
| 69 |
+
fname = "dcmdjpeg" + ".exe" * sys.platform.startswith("win")
|
| 70 |
+
for dir in (
|
| 71 |
+
"c:\\dcmtk",
|
| 72 |
+
"c:\\Program Files",
|
| 73 |
+
"c:\\Program Files\\dcmtk",
|
| 74 |
+
"c:\\Program Files (x86)\\dcmtk",
|
| 75 |
+
):
|
| 76 |
+
filename = os.path.join(dir, fname)
|
| 77 |
+
if os.path.isfile(filename):
|
| 78 |
+
return [filename]
|
| 79 |
+
|
| 80 |
+
try:
|
| 81 |
+
subprocess.check_call([fname, "--version"])
|
| 82 |
+
return [fname]
|
| 83 |
+
except Exception:
|
| 84 |
+
return None
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def get_gdcmconv_exe():
|
| 88 |
+
fname = "gdcmconv" + ".exe" * sys.platform.startswith("win")
|
| 89 |
+
# Maybe it's on the path
|
| 90 |
+
try:
|
| 91 |
+
subprocess.check_call([fname, "--version"])
|
| 92 |
+
return [fname, "--raw"]
|
| 93 |
+
except Exception:
|
| 94 |
+
pass
|
| 95 |
+
# Select directories where it could be
|
| 96 |
+
candidates = []
|
| 97 |
+
base_dirs = [r"c:\Program Files"]
|
| 98 |
+
for base_dir in base_dirs:
|
| 99 |
+
if os.path.isdir(base_dir):
|
| 100 |
+
for dname in os.listdir(base_dir):
|
| 101 |
+
if dname.lower().startswith("gdcm"):
|
| 102 |
+
suffix = dname[4:].strip()
|
| 103 |
+
candidates.append((suffix, os.path.join(base_dir, dname)))
|
| 104 |
+
# Sort, so higher versions are tried earlier
|
| 105 |
+
candidates.sort(reverse=True)
|
| 106 |
+
# Select executable
|
| 107 |
+
filename = None
|
| 108 |
+
for _, dirname in candidates:
|
| 109 |
+
exe1 = os.path.join(dirname, "gdcmconv.exe")
|
| 110 |
+
exe2 = os.path.join(dirname, "bin", "gdcmconv.exe")
|
| 111 |
+
if os.path.isfile(exe1):
|
| 112 |
+
filename = exe1
|
| 113 |
+
break
|
| 114 |
+
if os.path.isfile(exe2):
|
| 115 |
+
filename = exe2
|
| 116 |
+
break
|
| 117 |
+
else:
|
| 118 |
+
return None
|
| 119 |
+
return [filename, "--raw"]
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class DicomFormat(Format):
|
| 123 |
+
"""See :mod:`imageio.plugins.dicom`"""
|
| 124 |
+
|
| 125 |
+
def _can_read(self, request):
|
| 126 |
+
# If user URI was a directory, we check whether it has a DICOM file
|
| 127 |
+
if os.path.isdir(request.filename):
|
| 128 |
+
files = os.listdir(request.filename)
|
| 129 |
+
for fname in sorted(files): # Sorting make it consistent
|
| 130 |
+
filename = os.path.join(request.filename, fname)
|
| 131 |
+
if os.path.isfile(filename) and "DICOMDIR" not in fname:
|
| 132 |
+
with open(filename, "rb") as f:
|
| 133 |
+
first_bytes = read_n_bytes(f, 140)
|
| 134 |
+
return first_bytes[128:132] == b"DICM"
|
| 135 |
+
else:
|
| 136 |
+
return False
|
| 137 |
+
# Check
|
| 138 |
+
return request.firstbytes[128:132] == b"DICM"
|
| 139 |
+
|
| 140 |
+
def _can_write(self, request):
|
| 141 |
+
# We cannot save yet. May be possible if we will used pydicom as
|
| 142 |
+
# a backend.
|
| 143 |
+
return False
|
| 144 |
+
|
| 145 |
+
# --
|
| 146 |
+
|
| 147 |
+
class Reader(Format.Reader):
|
| 148 |
+
_compressed_warning_dirs = set()
|
| 149 |
+
|
| 150 |
+
def _open(self, progress=True):
|
| 151 |
+
if not _dicom:
|
| 152 |
+
load_lib()
|
| 153 |
+
if os.path.isdir(self.request.filename):
|
| 154 |
+
# A dir can be given if the user used the format explicitly
|
| 155 |
+
self._info = {}
|
| 156 |
+
self._data = None
|
| 157 |
+
else:
|
| 158 |
+
# Read the given dataset now ...
|
| 159 |
+
try:
|
| 160 |
+
dcm = _dicom.SimpleDicomReader(self.request.get_file())
|
| 161 |
+
except _dicom.CompressedDicom as err:
|
| 162 |
+
# We cannot do this on our own. Perhaps with some help ...
|
| 163 |
+
cmd = get_gdcmconv_exe()
|
| 164 |
+
if not cmd and "JPEG" in str(err):
|
| 165 |
+
cmd = get_dcmdjpeg_exe()
|
| 166 |
+
if not cmd:
|
| 167 |
+
msg = err.args[0].replace("using", "installing")
|
| 168 |
+
msg = msg.replace("convert", "auto-convert")
|
| 169 |
+
err.args = (msg,)
|
| 170 |
+
raise
|
| 171 |
+
else:
|
| 172 |
+
fname1 = self.request.get_local_filename()
|
| 173 |
+
fname2 = fname1 + ".raw"
|
| 174 |
+
try:
|
| 175 |
+
subprocess.check_call(cmd + [fname1, fname2])
|
| 176 |
+
except Exception:
|
| 177 |
+
raise err
|
| 178 |
+
d = os.path.dirname(fname1)
|
| 179 |
+
if d not in self._compressed_warning_dirs:
|
| 180 |
+
self._compressed_warning_dirs.add(d)
|
| 181 |
+
logger.warning(
|
| 182 |
+
"DICOM file contained compressed data. "
|
| 183 |
+
+ "Autoconverting with "
|
| 184 |
+
+ cmd[0]
|
| 185 |
+
+ " (this warning is shown once for each directory)"
|
| 186 |
+
)
|
| 187 |
+
dcm = _dicom.SimpleDicomReader(fname2)
|
| 188 |
+
|
| 189 |
+
self._info = dcm._info
|
| 190 |
+
self._data = dcm.get_numpy_array()
|
| 191 |
+
|
| 192 |
+
# Initialize series, list of DicomSeries objects
|
| 193 |
+
self._series = None # only created if needed
|
| 194 |
+
|
| 195 |
+
# Set progress indicator
|
| 196 |
+
if isinstance(progress, BaseProgressIndicator):
|
| 197 |
+
self._progressIndicator = progress
|
| 198 |
+
elif progress is True:
|
| 199 |
+
p = StdoutProgressIndicator("Reading DICOM")
|
| 200 |
+
self._progressIndicator = p
|
| 201 |
+
elif progress in (None, False):
|
| 202 |
+
self._progressIndicator = BaseProgressIndicator("Dummy")
|
| 203 |
+
else:
|
| 204 |
+
raise ValueError("Invalid value for progress.")
|
| 205 |
+
|
| 206 |
+
def _close(self):
|
| 207 |
+
# Clean up
|
| 208 |
+
self._info = None
|
| 209 |
+
self._data = None
|
| 210 |
+
self._series = None
|
| 211 |
+
|
| 212 |
+
@property
|
| 213 |
+
def series(self):
|
| 214 |
+
if self._series is None:
|
| 215 |
+
pi = self._progressIndicator
|
| 216 |
+
self._series = _dicom.process_directory(self.request, pi)
|
| 217 |
+
return self._series
|
| 218 |
+
|
| 219 |
+
def _get_length(self):
|
| 220 |
+
if self._data is None:
|
| 221 |
+
dcm = self.series[0][0]
|
| 222 |
+
self._info = dcm._info
|
| 223 |
+
self._data = dcm.get_numpy_array()
|
| 224 |
+
|
| 225 |
+
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
| 226 |
+
|
| 227 |
+
if self.request.mode[1] == "i":
|
| 228 |
+
# User expects one, but lets be honest about this file
|
| 229 |
+
return nslices
|
| 230 |
+
elif self.request.mode[1] == "I":
|
| 231 |
+
# User expects multiple, if this file has multiple slices, ok.
|
| 232 |
+
# Otherwise we have to check the series.
|
| 233 |
+
if nslices > 1:
|
| 234 |
+
return nslices
|
| 235 |
+
else:
|
| 236 |
+
return sum([len(serie) for serie in self.series])
|
| 237 |
+
elif self.request.mode[1] == "v":
|
| 238 |
+
# User expects a volume, if this file has one, ok.
|
| 239 |
+
# Otherwise we have to check the series
|
| 240 |
+
if nslices > 1:
|
| 241 |
+
return 1
|
| 242 |
+
else:
|
| 243 |
+
return len(self.series) # We assume one volume per series
|
| 244 |
+
elif self.request.mode[1] == "V":
|
| 245 |
+
# User expects multiple volumes. We have to check the series
|
| 246 |
+
return len(self.series) # We assume one volume per series
|
| 247 |
+
else:
|
| 248 |
+
raise RuntimeError("DICOM plugin should know what to expect.")
|
| 249 |
+
|
| 250 |
+
def _get_slice_data(self, index):
|
| 251 |
+
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
| 252 |
+
|
| 253 |
+
# Allow index >1 only if this file contains >1
|
| 254 |
+
if nslices > 1:
|
| 255 |
+
return self._data[index], self._info
|
| 256 |
+
elif index == 0:
|
| 257 |
+
return self._data, self._info
|
| 258 |
+
else:
|
| 259 |
+
raise IndexError("Dicom file contains only one slice.")
|
| 260 |
+
|
| 261 |
+
def _get_data(self, index):
|
| 262 |
+
if self._data is None:
|
| 263 |
+
dcm = self.series[0][0]
|
| 264 |
+
self._info = dcm._info
|
| 265 |
+
self._data = dcm.get_numpy_array()
|
| 266 |
+
|
| 267 |
+
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
| 268 |
+
|
| 269 |
+
if self.request.mode[1] == "i":
|
| 270 |
+
return self._get_slice_data(index)
|
| 271 |
+
elif self.request.mode[1] == "I":
|
| 272 |
+
# Return slice from volume, or return item from series
|
| 273 |
+
if index == 0 and nslices > 1:
|
| 274 |
+
return self._data[index], self._info
|
| 275 |
+
else:
|
| 276 |
+
L = []
|
| 277 |
+
for serie in self.series:
|
| 278 |
+
L.extend([dcm_ for dcm_ in serie])
|
| 279 |
+
return L[index].get_numpy_array(), L[index].info
|
| 280 |
+
elif self.request.mode[1] in "vV":
|
| 281 |
+
# Return volume or series
|
| 282 |
+
if index == 0 and nslices > 1:
|
| 283 |
+
return self._data, self._info
|
| 284 |
+
else:
|
| 285 |
+
return (
|
| 286 |
+
self.series[index].get_numpy_array(),
|
| 287 |
+
self.series[index].info,
|
| 288 |
+
)
|
| 289 |
+
# mode is `?` (typically because we are using V3). If there is a
|
| 290 |
+
# series (multiple files), index referrs to the element of the
|
| 291 |
+
# series and we read volumes. If there is no series, index
|
| 292 |
+
# referrs to the slice in the volume we read "flat" images.
|
| 293 |
+
elif len(self.series) > 1:
|
| 294 |
+
# mode is `?` and there are multiple series. Each series is a ndimage.
|
| 295 |
+
return (
|
| 296 |
+
self.series[index].get_numpy_array(),
|
| 297 |
+
self.series[index].info,
|
| 298 |
+
)
|
| 299 |
+
else:
|
| 300 |
+
# mode is `?` and there is only one series. Each slice is an ndimage.
|
| 301 |
+
return self._get_slice_data(index)
|
| 302 |
+
|
| 303 |
+
def _get_meta_data(self, index):
|
| 304 |
+
if self._data is None:
|
| 305 |
+
dcm = self.series[0][0]
|
| 306 |
+
self._info = dcm._info
|
| 307 |
+
self._data = dcm.get_numpy_array()
|
| 308 |
+
|
| 309 |
+
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
| 310 |
+
|
| 311 |
+
# Default is the meta data of the given file, or the "first" file.
|
| 312 |
+
if index is None:
|
| 313 |
+
return self._info
|
| 314 |
+
|
| 315 |
+
if self.request.mode[1] == "i":
|
| 316 |
+
return self._info
|
| 317 |
+
elif self.request.mode[1] == "I":
|
| 318 |
+
# Return slice from volume, or return item from series
|
| 319 |
+
if index == 0 and nslices > 1:
|
| 320 |
+
return self._info
|
| 321 |
+
else:
|
| 322 |
+
L = []
|
| 323 |
+
for serie in self.series:
|
| 324 |
+
L.extend([dcm_ for dcm_ in serie])
|
| 325 |
+
return L[index].info
|
| 326 |
+
elif self.request.mode[1] in "vV":
|
| 327 |
+
# Return volume or series
|
| 328 |
+
if index == 0 and nslices > 1:
|
| 329 |
+
return self._info
|
| 330 |
+
else:
|
| 331 |
+
return self.series[index].info
|
| 332 |
+
else: # pragma: no cover
|
| 333 |
+
raise ValueError("DICOM plugin should know what to expect.")
|
parrot/lib/python3.10/site-packages/imageio/plugins/example.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 3 |
+
|
| 4 |
+
""" Example plugin. You can use this as a template for your own plugin.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
from .. import formats
|
| 10 |
+
from ..core import Format
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class DummyFormat(Format):
|
| 14 |
+
"""The dummy format is an example format that does nothing.
|
| 15 |
+
It will never indicate that it can read or write a file. When
|
| 16 |
+
explicitly asked to read, it will simply read the bytes. When
|
| 17 |
+
explicitly asked to write, it will raise an error.
|
| 18 |
+
|
| 19 |
+
This documentation is shown when the user does ``help('thisformat')``.
|
| 20 |
+
|
| 21 |
+
Parameters for reading
|
| 22 |
+
----------------------
|
| 23 |
+
Specify arguments in numpy doc style here.
|
| 24 |
+
|
| 25 |
+
Parameters for saving
|
| 26 |
+
---------------------
|
| 27 |
+
Specify arguments in numpy doc style here.
|
| 28 |
+
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def _can_read(self, request):
|
| 32 |
+
# This method is called when the format manager is searching
|
| 33 |
+
# for a format to read a certain image. Return True if this format
|
| 34 |
+
# can do it.
|
| 35 |
+
#
|
| 36 |
+
# The format manager is aware of the extensions and the modes
|
| 37 |
+
# that each format can handle. It will first ask all formats
|
| 38 |
+
# that *seem* to be able to read it whether they can. If none
|
| 39 |
+
# can, it will ask the remaining formats if they can: the
|
| 40 |
+
# extension might be missing, and this allows formats to provide
|
| 41 |
+
# functionality for certain extensions, while giving preference
|
| 42 |
+
# to other plugins.
|
| 43 |
+
#
|
| 44 |
+
# If a format says it can, it should live up to it. The format
|
| 45 |
+
# would ideally check the request.firstbytes and look for a
|
| 46 |
+
# header of some kind.
|
| 47 |
+
#
|
| 48 |
+
# The request object has:
|
| 49 |
+
# request.filename: a representation of the source (only for reporting)
|
| 50 |
+
# request.firstbytes: the first 256 bytes of the file.
|
| 51 |
+
# request.mode[0]: read or write mode
|
| 52 |
+
|
| 53 |
+
if request.extension in self.extensions:
|
| 54 |
+
return True
|
| 55 |
+
|
| 56 |
+
def _can_write(self, request):
|
| 57 |
+
# This method is called when the format manager is searching
|
| 58 |
+
# for a format to write a certain image. It will first ask all
|
| 59 |
+
# formats that *seem* to be able to write it whether they can.
|
| 60 |
+
# If none can, it will ask the remaining formats if they can.
|
| 61 |
+
#
|
| 62 |
+
# Return True if the format can do it.
|
| 63 |
+
|
| 64 |
+
# In most cases, this code does suffice:
|
| 65 |
+
if request.extension in self.extensions:
|
| 66 |
+
return True
|
| 67 |
+
|
| 68 |
+
# -- reader
|
| 69 |
+
|
| 70 |
+
class Reader(Format.Reader):
|
| 71 |
+
def _open(self, some_option=False, length=1):
|
| 72 |
+
# Specify kwargs here. Optionally, the user-specified kwargs
|
| 73 |
+
# can also be accessed via the request.kwargs object.
|
| 74 |
+
#
|
| 75 |
+
# The request object provides two ways to get access to the
|
| 76 |
+
# data. Use just one:
|
| 77 |
+
# - Use request.get_file() for a file object (preferred)
|
| 78 |
+
# - Use request.get_local_filename() for a file on the system
|
| 79 |
+
self._fp = self.request.get_file()
|
| 80 |
+
self._length = length # passed as an arg in this case for testing
|
| 81 |
+
self._data = None
|
| 82 |
+
|
| 83 |
+
def _close(self):
|
| 84 |
+
# Close the reader.
|
| 85 |
+
# Note that the request object will close self._fp
|
| 86 |
+
pass
|
| 87 |
+
|
| 88 |
+
def _get_length(self):
|
| 89 |
+
# Return the number of images. Can be np.inf
|
| 90 |
+
return self._length
|
| 91 |
+
|
| 92 |
+
def _get_data(self, index):
|
| 93 |
+
# Return the data and meta data for the given index
|
| 94 |
+
if index >= self._length:
|
| 95 |
+
raise IndexError("Image index %i > %i" % (index, self._length))
|
| 96 |
+
# Read all bytes
|
| 97 |
+
if self._data is None:
|
| 98 |
+
self._data = self._fp.read()
|
| 99 |
+
# Put in a numpy array
|
| 100 |
+
im = np.frombuffer(self._data, "uint8")
|
| 101 |
+
im.shape = len(im), 1
|
| 102 |
+
# Return array and dummy meta data
|
| 103 |
+
return im, {}
|
| 104 |
+
|
| 105 |
+
def _get_meta_data(self, index):
|
| 106 |
+
# Get the meta data for the given index. If index is None, it
|
| 107 |
+
# should return the global meta data.
|
| 108 |
+
return {} # This format does not support meta data
|
| 109 |
+
|
| 110 |
+
# -- writer
|
| 111 |
+
|
| 112 |
+
class Writer(Format.Writer):
|
| 113 |
+
def _open(self, flags=0):
|
| 114 |
+
# Specify kwargs here. Optionally, the user-specified kwargs
|
| 115 |
+
# can also be accessed via the request.kwargs object.
|
| 116 |
+
#
|
| 117 |
+
# The request object provides two ways to write the data.
|
| 118 |
+
# Use just one:
|
| 119 |
+
# - Use request.get_file() for a file object (preferred)
|
| 120 |
+
# - Use request.get_local_filename() for a file on the system
|
| 121 |
+
self._fp = self.request.get_file()
|
| 122 |
+
|
| 123 |
+
def _close(self):
|
| 124 |
+
# Close the reader.
|
| 125 |
+
# Note that the request object will close self._fp
|
| 126 |
+
pass
|
| 127 |
+
|
| 128 |
+
def _append_data(self, im, meta):
|
| 129 |
+
# Process the given data and meta data.
|
| 130 |
+
raise RuntimeError("The dummy format cannot write image data.")
|
| 131 |
+
|
| 132 |
+
def set_meta_data(self, meta):
|
| 133 |
+
# Process the given meta data (global for all images)
|
| 134 |
+
# It is not mandatory to support this.
|
| 135 |
+
raise RuntimeError("The dummy format cannot write meta data.")
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
# Register. You register an *instance* of a Format class. Here specify:
|
| 139 |
+
format = DummyFormat(
|
| 140 |
+
"dummy", # short name
|
| 141 |
+
"An example format that does nothing.", # one line descr.
|
| 142 |
+
".foobar .nonexistentext", # list of extensions
|
| 143 |
+
"iI", # modes, characters in iIvV
|
| 144 |
+
)
|
| 145 |
+
formats.add_format(format)
|
parrot/lib/python3.10/site-packages/imageio/plugins/feisem.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 3 |
+
|
| 4 |
+
"""Read TIFF from FEI SEM microscopes.
|
| 5 |
+
|
| 6 |
+
Backend Library: internal
|
| 7 |
+
|
| 8 |
+
This format is based on :mod:`TIFF <imageio.plugins.tifffile>`, and supports the
|
| 9 |
+
same parameters. FEI microscopes append metadata as ASCII text at the end of the
|
| 10 |
+
file, which this reader correctly extracts.
|
| 11 |
+
|
| 12 |
+
Parameters
|
| 13 |
+
----------
|
| 14 |
+
discard_watermark : bool
|
| 15 |
+
If True (default), discard the bottom rows of the image, which
|
| 16 |
+
contain no image data, only a watermark with metadata.
|
| 17 |
+
watermark_height : int
|
| 18 |
+
The height in pixels of the FEI watermark. The default is 70.
|
| 19 |
+
|
| 20 |
+
See Also
|
| 21 |
+
--------
|
| 22 |
+
:mod:`imageio.plugins.tifffile`
|
| 23 |
+
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
from .tifffile import TiffFormat
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class FEISEMFormat(TiffFormat):
|
| 31 |
+
"""See :mod:`imageio.plugins.feisem`"""
|
| 32 |
+
|
| 33 |
+
def _can_write(self, request):
|
| 34 |
+
return False # FEI-SEM only supports reading
|
| 35 |
+
|
| 36 |
+
class Reader(TiffFormat.Reader):
|
| 37 |
+
def _get_data(self, index=0, discard_watermark=True, watermark_height=70):
|
| 38 |
+
"""Get image and metadata from given index.
|
| 39 |
+
|
| 40 |
+
FEI images usually (always?) contain a watermark at the
|
| 41 |
+
bottom of the image, 70 pixels high. We discard this by
|
| 42 |
+
default as it does not contain any information not present
|
| 43 |
+
in the metadata.
|
| 44 |
+
"""
|
| 45 |
+
im, meta = super(FEISEMFormat.Reader, self)._get_data(index)
|
| 46 |
+
if discard_watermark:
|
| 47 |
+
im = im[:-watermark_height]
|
| 48 |
+
return im, meta
|
| 49 |
+
|
| 50 |
+
def _get_meta_data(self, index=None):
|
| 51 |
+
"""Read the metadata from an FEI SEM TIFF.
|
| 52 |
+
|
| 53 |
+
This metadata is included as ASCII text at the end of the file.
|
| 54 |
+
|
| 55 |
+
The index, if provided, is ignored.
|
| 56 |
+
|
| 57 |
+
Returns
|
| 58 |
+
-------
|
| 59 |
+
metadata : dict
|
| 60 |
+
Dictionary of metadata.
|
| 61 |
+
"""
|
| 62 |
+
if hasattr(self, "_fei_meta"):
|
| 63 |
+
return self._fei_meta
|
| 64 |
+
|
| 65 |
+
md = {"root": {}}
|
| 66 |
+
current_tag = "root"
|
| 67 |
+
reading_metadata = False
|
| 68 |
+
filename = self.request.get_local_filename()
|
| 69 |
+
with open(filename, encoding="utf8", errors="ignore") as fin:
|
| 70 |
+
for line in fin:
|
| 71 |
+
if not reading_metadata:
|
| 72 |
+
if not line.startswith("Date="):
|
| 73 |
+
continue
|
| 74 |
+
else:
|
| 75 |
+
reading_metadata = True
|
| 76 |
+
line = line.rstrip()
|
| 77 |
+
if line.startswith("["):
|
| 78 |
+
current_tag = line.lstrip("[").rstrip("]")
|
| 79 |
+
md[current_tag] = {}
|
| 80 |
+
else:
|
| 81 |
+
if "=" in line: # ignore empty and irrelevant lines
|
| 82 |
+
key, val = line.split("=", maxsplit=1)
|
| 83 |
+
for tag_type in (int, float):
|
| 84 |
+
try:
|
| 85 |
+
val = tag_type(val)
|
| 86 |
+
except ValueError:
|
| 87 |
+
continue
|
| 88 |
+
else:
|
| 89 |
+
break
|
| 90 |
+
md[current_tag][key] = val
|
| 91 |
+
if not md["root"] and len(md) == 1:
|
| 92 |
+
raise ValueError("Input file %s contains no FEI metadata." % filename)
|
| 93 |
+
|
| 94 |
+
self._fei_meta = md
|
| 95 |
+
return md
|
parrot/lib/python3.10/site-packages/imageio/plugins/ffmpeg.py
ADDED
|
@@ -0,0 +1,729 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# imageio is distributed under the terms of the (new) BSD License.
|
| 3 |
+
|
| 4 |
+
"""Read/Write video using FFMPEG
|
| 5 |
+
|
| 6 |
+
.. note::
|
| 7 |
+
We are in the process of (slowly) replacing this plugin with a new one that
|
| 8 |
+
is based on `pyav <https://pyav.org/docs/stable/>`_. It is faster and more
|
| 9 |
+
flexible than the plugin documented here. Check the :mod:`pyav
|
| 10 |
+
plugin's documentation <imageio.plugins.pyav>` for more information about
|
| 11 |
+
this plugin.
|
| 12 |
+
|
| 13 |
+
Backend Library: https://github.com/imageio/imageio-ffmpeg
|
| 14 |
+
|
| 15 |
+
.. note::
|
| 16 |
+
To use this plugin you have to install its backend::
|
| 17 |
+
|
| 18 |
+
pip install imageio[ffmpeg]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
The ffmpeg format provides reading and writing for a wide range of movie formats
|
| 22 |
+
such as .avi, .mpeg, .mp4, etc. as well as the ability to read streams from
|
| 23 |
+
webcams and USB cameras. It is based on ffmpeg and is inspired by/based `moviepy
|
| 24 |
+
<https://github.com/Zulko/moviepy/>`_ by Zulko.
|
| 25 |
+
|
| 26 |
+
Parameters for reading
|
| 27 |
+
----------------------
|
| 28 |
+
fps : scalar
|
| 29 |
+
The number of frames per second of the input stream. Default None (i.e.
|
| 30 |
+
read at the file's native fps). One can use this for files with a
|
| 31 |
+
variable fps, or in cases where imageio is unable to correctly detect
|
| 32 |
+
the fps. In case of trouble opening camera streams, it may help to set an
|
| 33 |
+
explicit fps value matching a framerate supported by the camera.
|
| 34 |
+
loop : bool
|
| 35 |
+
If True, the video will rewind as soon as a frame is requested
|
| 36 |
+
beyond the last frame. Otherwise, IndexError is raised. Default False.
|
| 37 |
+
Setting this to True will internally call ``count_frames()``,
|
| 38 |
+
and set the reader's length to that value instead of inf.
|
| 39 |
+
size : str | tuple
|
| 40 |
+
The frame size (i.e. resolution) to read the images, e.g.
|
| 41 |
+
(100, 100) or "640x480". For camera streams, this allows setting
|
| 42 |
+
the capture resolution. For normal video data, ffmpeg will
|
| 43 |
+
rescale the data.
|
| 44 |
+
dtype : str | type
|
| 45 |
+
The dtype for the output arrays. Determines the bit-depth that
|
| 46 |
+
is requested from ffmpeg. Supported dtypes: uint8, uint16.
|
| 47 |
+
Default: uint8.
|
| 48 |
+
pixelformat : str
|
| 49 |
+
The pixel format for the camera to use (e.g. "yuyv422" or
|
| 50 |
+
"gray"). The camera needs to support the format in order for
|
| 51 |
+
this to take effect. Note that the images produced by this
|
| 52 |
+
reader are always RGB.
|
| 53 |
+
input_params : list
|
| 54 |
+
List additional arguments to ffmpeg for input file options.
|
| 55 |
+
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
|
| 56 |
+
Example ffmpeg arguments to use aggressive error handling:
|
| 57 |
+
['-err_detect', 'aggressive']
|
| 58 |
+
output_params : list
|
| 59 |
+
List additional arguments to ffmpeg for output file options (i.e. the
|
| 60 |
+
stream being read by imageio).
|
| 61 |
+
print_info : bool
|
| 62 |
+
Print information about the video file as reported by ffmpeg.
|
| 63 |
+
|
| 64 |
+
Parameters for writing
|
| 65 |
+
----------------------
|
| 66 |
+
fps : scalar
|
| 67 |
+
The number of frames per second. Default 10.
|
| 68 |
+
codec : str
|
| 69 |
+
the video codec to use. Default 'libx264', which represents the
|
| 70 |
+
widely available mpeg4. Except when saving .wmv files, then the
|
| 71 |
+
defaults is 'msmpeg4' which is more commonly supported for windows
|
| 72 |
+
quality : float | None
|
| 73 |
+
Video output quality. Default is 5. Uses variable bit rate. Highest
|
| 74 |
+
quality is 10, lowest is 0. Set to None to prevent variable bitrate
|
| 75 |
+
flags to FFMPEG so you can manually specify them using output_params
|
| 76 |
+
instead. Specifying a fixed bitrate using 'bitrate' disables this
|
| 77 |
+
parameter.
|
| 78 |
+
bitrate : int | None
|
| 79 |
+
Set a constant bitrate for the video encoding. Default is None causing
|
| 80 |
+
'quality' parameter to be used instead. Better quality videos with
|
| 81 |
+
smaller file sizes will result from using the 'quality' variable
|
| 82 |
+
bitrate parameter rather than specifying a fixed bitrate with this
|
| 83 |
+
parameter.
|
| 84 |
+
pixelformat: str
|
| 85 |
+
The output video pixel format. Default is 'yuv420p' which most widely
|
| 86 |
+
supported by video players.
|
| 87 |
+
input_params : list
|
| 88 |
+
List additional arguments to ffmpeg for input file options (i.e. the
|
| 89 |
+
stream that imageio provides).
|
| 90 |
+
output_params : list
|
| 91 |
+
List additional arguments to ffmpeg for output file options.
|
| 92 |
+
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
|
| 93 |
+
Example ffmpeg arguments to use only intra frames and set aspect ratio:
|
| 94 |
+
['-intra', '-aspect', '16:9']
|
| 95 |
+
ffmpeg_log_level: str
|
| 96 |
+
Sets ffmpeg output log level. Default is "warning".
|
| 97 |
+
Values can be "quiet", "panic", "fatal", "error", "warning", "info"
|
| 98 |
+
"verbose", or "debug". Also prints the FFMPEG command being used by
|
| 99 |
+
imageio if "info", "verbose", or "debug".
|
| 100 |
+
macro_block_size: int
|
| 101 |
+
Size constraint for video. Width and height, must be divisible by this
|
| 102 |
+
number. If not divisible by this number imageio will tell ffmpeg to
|
| 103 |
+
scale the image up to the next closest size
|
| 104 |
+
divisible by this number. Most codecs are compatible with a macroblock
|
| 105 |
+
size of 16 (default), some can go smaller (4, 8). To disable this
|
| 106 |
+
automatic feature set it to None or 1, however be warned many players
|
| 107 |
+
can't decode videos that are odd in size and some codecs will produce
|
| 108 |
+
poor results or fail. See https://en.wikipedia.org/wiki/Macroblock.
|
| 109 |
+
audio_path : str | None
|
| 110 |
+
Audio path of any audio that needs to be written. Defaults to nothing,
|
| 111 |
+
so no audio will be written. Please note, when writing shorter video
|
| 112 |
+
than the original, ffmpeg will not truncate the audio track; it
|
| 113 |
+
will maintain its original length and be longer than the video.
|
| 114 |
+
audio_codec : str | None
|
| 115 |
+
The audio codec to use. Defaults to nothing, but if an audio_path has
|
| 116 |
+
been provided ffmpeg will attempt to set a default codec.
|
| 117 |
+
|
| 118 |
+
Notes
|
| 119 |
+
-----
|
| 120 |
+
If you are using anaconda and ``anaconda/ffmpeg`` you will not be able to
|
| 121 |
+
encode/decode H.264 (likely due to licensing concerns). If you need this
|
| 122 |
+
format on anaconda install ``conda-forge/ffmpeg`` instead.
|
| 123 |
+
|
| 124 |
+
You can use the ``IMAGEIO_FFMPEG_EXE`` environment variable to force using a
|
| 125 |
+
specific ffmpeg executable.
|
| 126 |
+
|
| 127 |
+
To get the number of frames before having read them all, you can use the
|
| 128 |
+
``reader.count_frames()`` method (the reader will then use
|
| 129 |
+
``imageio_ffmpeg.count_frames_and_secs()`` to get the exact number of frames,
|
| 130 |
+
note that this operation can take a few seconds on large files). Alternatively,
|
| 131 |
+
the number of frames can be estimated from the fps and duration in the meta data
|
| 132 |
+
(though these values themselves are not always present/reliable).
|
| 133 |
+
|
| 134 |
+
"""
|
| 135 |
+
|
| 136 |
+
import re
|
| 137 |
+
import sys
|
| 138 |
+
import time
|
| 139 |
+
import logging
|
| 140 |
+
import platform
|
| 141 |
+
import threading
|
| 142 |
+
import subprocess as sp
|
| 143 |
+
import imageio_ffmpeg
|
| 144 |
+
|
| 145 |
+
import numpy as np
|
| 146 |
+
|
| 147 |
+
from ..core import Format, image_as_uint
|
| 148 |
+
|
| 149 |
+
logger = logging.getLogger(__name__)
|
| 150 |
+
|
| 151 |
+
# Get camera format
|
| 152 |
+
if sys.platform.startswith("win"):
|
| 153 |
+
CAM_FORMAT = "dshow" # dshow or vfwcap
|
| 154 |
+
elif sys.platform.startswith("linux"):
|
| 155 |
+
CAM_FORMAT = "video4linux2"
|
| 156 |
+
elif sys.platform.startswith("darwin"):
|
| 157 |
+
CAM_FORMAT = "avfoundation"
|
| 158 |
+
else: # pragma: no cover
|
| 159 |
+
CAM_FORMAT = "unknown-cam-format"
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def download(directory=None, force_download=False): # pragma: no cover
|
| 163 |
+
raise RuntimeError(
|
| 164 |
+
"imageio.ffmpeg.download() has been deprecated. "
|
| 165 |
+
"Use 'pip install imageio-ffmpeg' instead.'"
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# For backwards compatibility - we dont use this ourselves
|
| 170 |
+
def get_exe(): # pragma: no cover
|
| 171 |
+
"""Wrapper for imageio_ffmpeg.get_ffmpeg_exe()"""
|
| 172 |
+
|
| 173 |
+
return imageio_ffmpeg.get_ffmpeg_exe()
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
class FfmpegFormat(Format):
|
| 177 |
+
"""Read/Write ImageResources using FFMPEG.
|
| 178 |
+
|
| 179 |
+
See :mod:`imageio.plugins.ffmpeg`
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
def _can_read(self, request):
|
| 183 |
+
# Read from video stream?
|
| 184 |
+
# Note that we could write the _video flag here, but a user might
|
| 185 |
+
# select this format explicitly (and this code is not run)
|
| 186 |
+
if re.match(r"<video(\d+)>", request.filename):
|
| 187 |
+
return True
|
| 188 |
+
|
| 189 |
+
# Read from file that we know?
|
| 190 |
+
if request.extension in self.extensions:
|
| 191 |
+
return True
|
| 192 |
+
|
| 193 |
+
def _can_write(self, request):
|
| 194 |
+
if request.extension in self.extensions:
|
| 195 |
+
return True
|
| 196 |
+
|
| 197 |
+
# --
|
| 198 |
+
|
| 199 |
+
class Reader(Format.Reader):
|
| 200 |
+
_frame_catcher = None
|
| 201 |
+
_read_gen = None
|
| 202 |
+
|
| 203 |
+
def _get_cam_inputname(self, index):
|
| 204 |
+
if sys.platform.startswith("linux"):
|
| 205 |
+
return "/dev/" + self.request._video[1:-1]
|
| 206 |
+
|
| 207 |
+
elif sys.platform.startswith("win"):
|
| 208 |
+
# Ask ffmpeg for list of dshow device names
|
| 209 |
+
ffmpeg_api = imageio_ffmpeg
|
| 210 |
+
cmd = [
|
| 211 |
+
ffmpeg_api.get_ffmpeg_exe(),
|
| 212 |
+
"-list_devices",
|
| 213 |
+
"true",
|
| 214 |
+
"-f",
|
| 215 |
+
CAM_FORMAT,
|
| 216 |
+
"-i",
|
| 217 |
+
"dummy",
|
| 218 |
+
]
|
| 219 |
+
# Set `shell=True` in sp.run to prevent popup of a command
|
| 220 |
+
# line window in frozen applications. Note: this would be a
|
| 221 |
+
# security vulnerability if user-input goes into the cmd.
|
| 222 |
+
# Note that the ffmpeg process returns with exit code 1 when
|
| 223 |
+
# using `-list_devices` (or `-list_options`), even if the
|
| 224 |
+
# command is successful, so we set `check=False` explicitly.
|
| 225 |
+
completed_process = sp.run(
|
| 226 |
+
cmd,
|
| 227 |
+
stdout=sp.PIPE,
|
| 228 |
+
stderr=sp.PIPE,
|
| 229 |
+
encoding="utf-8",
|
| 230 |
+
shell=True,
|
| 231 |
+
check=False,
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
# Return device name at index
|
| 235 |
+
try:
|
| 236 |
+
name = parse_device_names(completed_process.stderr)[index]
|
| 237 |
+
except IndexError:
|
| 238 |
+
raise IndexError("No ffdshow camera at index %i." % index)
|
| 239 |
+
return "video=%s" % name
|
| 240 |
+
|
| 241 |
+
elif sys.platform.startswith("darwin"):
|
| 242 |
+
# Appears that newer ffmpeg builds don't support -list-devices
|
| 243 |
+
# on OS X. But you can directly open the camera by index.
|
| 244 |
+
name = str(index)
|
| 245 |
+
return name
|
| 246 |
+
|
| 247 |
+
else: # pragma: no cover
|
| 248 |
+
return "??"
|
| 249 |
+
|
| 250 |
+
def _open(
|
| 251 |
+
self,
|
| 252 |
+
loop=False,
|
| 253 |
+
size=None,
|
| 254 |
+
dtype=None,
|
| 255 |
+
pixelformat=None,
|
| 256 |
+
print_info=False,
|
| 257 |
+
ffmpeg_params=None,
|
| 258 |
+
input_params=None,
|
| 259 |
+
output_params=None,
|
| 260 |
+
fps=None,
|
| 261 |
+
):
|
| 262 |
+
# Get generator functions
|
| 263 |
+
self._ffmpeg_api = imageio_ffmpeg
|
| 264 |
+
# Process input args
|
| 265 |
+
self._arg_loop = bool(loop)
|
| 266 |
+
if size is None:
|
| 267 |
+
self._arg_size = None
|
| 268 |
+
elif isinstance(size, tuple):
|
| 269 |
+
self._arg_size = "%ix%i" % size
|
| 270 |
+
elif isinstance(size, str) and "x" in size:
|
| 271 |
+
self._arg_size = size
|
| 272 |
+
else:
|
| 273 |
+
raise ValueError('FFMPEG size must be tuple of "NxM"')
|
| 274 |
+
if pixelformat is None:
|
| 275 |
+
pass
|
| 276 |
+
elif not isinstance(pixelformat, str):
|
| 277 |
+
raise ValueError("FFMPEG pixelformat must be str")
|
| 278 |
+
if dtype is None:
|
| 279 |
+
self._dtype = np.dtype("uint8")
|
| 280 |
+
else:
|
| 281 |
+
self._dtype = np.dtype(dtype)
|
| 282 |
+
allowed_dtypes = ["uint8", "uint16"]
|
| 283 |
+
if self._dtype.name not in allowed_dtypes:
|
| 284 |
+
raise ValueError(
|
| 285 |
+
"dtype must be one of: {}".format(", ".join(allowed_dtypes))
|
| 286 |
+
)
|
| 287 |
+
self._arg_pixelformat = pixelformat
|
| 288 |
+
self._arg_input_params = input_params or []
|
| 289 |
+
self._arg_output_params = output_params or []
|
| 290 |
+
self._arg_input_params += ffmpeg_params or [] # backward compat
|
| 291 |
+
# Write "_video"_arg - indicating webcam support
|
| 292 |
+
self.request._video = None
|
| 293 |
+
regex_match = re.match(r"<video(\d+)>", self.request.filename)
|
| 294 |
+
if regex_match:
|
| 295 |
+
self.request._video = self.request.filename
|
| 296 |
+
# Get local filename
|
| 297 |
+
if self.request._video:
|
| 298 |
+
index = int(regex_match.group(1))
|
| 299 |
+
self._filename = self._get_cam_inputname(index)
|
| 300 |
+
else:
|
| 301 |
+
self._filename = self.request.get_local_filename()
|
| 302 |
+
# When passed to ffmpeg on command line, carets need to be escaped.
|
| 303 |
+
self._filename = self._filename.replace("^", "^^")
|
| 304 |
+
# Determine pixel format and depth
|
| 305 |
+
self._depth = 3
|
| 306 |
+
if self._dtype.name == "uint8":
|
| 307 |
+
self._pix_fmt = "rgb24"
|
| 308 |
+
self._bytes_per_channel = 1
|
| 309 |
+
else:
|
| 310 |
+
self._pix_fmt = "rgb48le"
|
| 311 |
+
self._bytes_per_channel = 2
|
| 312 |
+
# Initialize parameters
|
| 313 |
+
self._pos = -1
|
| 314 |
+
self._meta = {"plugin": "ffmpeg"}
|
| 315 |
+
self._lastread = None
|
| 316 |
+
|
| 317 |
+
# Calculating this from fps and duration is not accurate,
|
| 318 |
+
# and calculating it exactly with ffmpeg_api.count_frames_and_secs
|
| 319 |
+
# takes too long to do for each video. But we need it for looping.
|
| 320 |
+
self._nframes = float("inf")
|
| 321 |
+
if self._arg_loop and not self.request._video:
|
| 322 |
+
self._nframes = self.count_frames()
|
| 323 |
+
self._meta["nframes"] = self._nframes
|
| 324 |
+
|
| 325 |
+
# Specify input framerate? (only on macOS)
|
| 326 |
+
# Ideally we'd get the supported framerate from the metadata, but we get the
|
| 327 |
+
# metadata when we boot ffmpeg ... maybe we could refactor this so we can
|
| 328 |
+
# get the metadata beforehand, but for now we'll just give it 2 tries on MacOS,
|
| 329 |
+
# one with fps 30 and one with fps 15.
|
| 330 |
+
need_input_fps = need_output_fps = False
|
| 331 |
+
if self.request._video and platform.system().lower() == "darwin":
|
| 332 |
+
if "-framerate" not in str(self._arg_input_params):
|
| 333 |
+
need_input_fps = True
|
| 334 |
+
if not self.request.kwargs.get("fps", None):
|
| 335 |
+
need_output_fps = True
|
| 336 |
+
if need_input_fps:
|
| 337 |
+
self._arg_input_params.extend(["-framerate", str(float(30))])
|
| 338 |
+
if need_output_fps:
|
| 339 |
+
self._arg_output_params.extend(["-r", str(float(30))])
|
| 340 |
+
|
| 341 |
+
# Start ffmpeg subprocess and get meta information
|
| 342 |
+
try:
|
| 343 |
+
self._initialize()
|
| 344 |
+
except IndexError:
|
| 345 |
+
# Specify input framerate again, this time different.
|
| 346 |
+
if need_input_fps:
|
| 347 |
+
self._arg_input_params[-1] = str(float(15))
|
| 348 |
+
self._initialize()
|
| 349 |
+
else:
|
| 350 |
+
raise
|
| 351 |
+
|
| 352 |
+
# For cameras, create thread that keeps reading the images
|
| 353 |
+
if self.request._video:
|
| 354 |
+
self._frame_catcher = FrameCatcher(self._read_gen)
|
| 355 |
+
|
| 356 |
+
# For reference - but disabled, because it is inaccurate
|
| 357 |
+
# if self._meta["nframes"] == float("inf"):
|
| 358 |
+
# if self._meta.get("fps", 0) > 0:
|
| 359 |
+
# if self._meta.get("duration", 0) > 0:
|
| 360 |
+
# n = round(self._meta["duration"] * self._meta["fps"])
|
| 361 |
+
# self._meta["nframes"] = int(n)
|
| 362 |
+
|
| 363 |
+
def _close(self):
|
| 364 |
+
# First close the frame catcher, because we cannot close the gen
|
| 365 |
+
# if the frame catcher thread is using it
|
| 366 |
+
if self._frame_catcher is not None:
|
| 367 |
+
self._frame_catcher.stop_me()
|
| 368 |
+
self._frame_catcher = None
|
| 369 |
+
if self._read_gen is not None:
|
| 370 |
+
self._read_gen.close()
|
| 371 |
+
self._read_gen = None
|
| 372 |
+
|
| 373 |
+
def count_frames(self):
|
| 374 |
+
"""Count the number of frames. Note that this can take a few
|
| 375 |
+
seconds for large files. Also note that it counts the number
|
| 376 |
+
of frames in the original video and does not take a given fps
|
| 377 |
+
into account.
|
| 378 |
+
"""
|
| 379 |
+
# This would have been nice, but this does not work :(
|
| 380 |
+
# oargs = []
|
| 381 |
+
# if self.request.kwargs.get("fps", None):
|
| 382 |
+
# fps = float(self.request.kwargs["fps"])
|
| 383 |
+
# oargs += ["-r", "%.02f" % fps]
|
| 384 |
+
cf = self._ffmpeg_api.count_frames_and_secs
|
| 385 |
+
return cf(self._filename)[0]
|
| 386 |
+
|
| 387 |
+
def _get_length(self):
|
| 388 |
+
return self._nframes # only not inf if loop is True
|
| 389 |
+
|
| 390 |
+
def _get_data(self, index):
|
| 391 |
+
"""Reads a frame at index. Note for coders: getting an
|
| 392 |
+
arbitrary frame in the video with ffmpeg can be painfully
|
| 393 |
+
slow if some decoding has to be done. This function tries
|
| 394 |
+
to avoid fectching arbitrary frames whenever possible, by
|
| 395 |
+
moving between adjacent frames."""
|
| 396 |
+
# Modulo index (for looping)
|
| 397 |
+
if self._arg_loop and self._nframes < float("inf"):
|
| 398 |
+
index %= self._nframes
|
| 399 |
+
|
| 400 |
+
if index == self._pos:
|
| 401 |
+
return self._lastread, dict(new=False)
|
| 402 |
+
elif index < 0:
|
| 403 |
+
raise IndexError("Frame index must be >= 0")
|
| 404 |
+
elif index >= self._nframes:
|
| 405 |
+
raise IndexError("Reached end of video")
|
| 406 |
+
else:
|
| 407 |
+
if (index < self._pos) or (index > self._pos + 100):
|
| 408 |
+
self._initialize(index)
|
| 409 |
+
else:
|
| 410 |
+
self._skip_frames(index - self._pos - 1)
|
| 411 |
+
result, is_new = self._read_frame()
|
| 412 |
+
self._pos = index
|
| 413 |
+
return result, dict(new=is_new)
|
| 414 |
+
|
| 415 |
+
def _get_meta_data(self, index):
|
| 416 |
+
return self._meta
|
| 417 |
+
|
| 418 |
+
def _initialize(self, index=0):
|
| 419 |
+
# Close the current generator, and thereby terminate its subprocess
|
| 420 |
+
if self._read_gen is not None:
|
| 421 |
+
self._read_gen.close()
|
| 422 |
+
|
| 423 |
+
iargs = []
|
| 424 |
+
oargs = []
|
| 425 |
+
|
| 426 |
+
# Create input args
|
| 427 |
+
iargs += self._arg_input_params
|
| 428 |
+
if self.request._video:
|
| 429 |
+
iargs += ["-f", CAM_FORMAT]
|
| 430 |
+
if self._arg_pixelformat:
|
| 431 |
+
iargs += ["-pix_fmt", self._arg_pixelformat]
|
| 432 |
+
if self._arg_size:
|
| 433 |
+
iargs += ["-s", self._arg_size]
|
| 434 |
+
elif index > 0: # re-initialize / seek
|
| 435 |
+
# Note: only works if we initialized earlier, and now have meta
|
| 436 |
+
# Some info here: https://trac.ffmpeg.org/wiki/Seeking
|
| 437 |
+
# There are two ways to seek, one before -i (input_params) and
|
| 438 |
+
# after (output_params). The former is fast, because it uses
|
| 439 |
+
# keyframes, the latter is slow but accurate. According to
|
| 440 |
+
# the article above, the fast method should also be accurate
|
| 441 |
+
# from ffmpeg version 2.1, however in version 4.1 our tests
|
| 442 |
+
# start failing again. Not sure why, but we can solve this
|
| 443 |
+
# by combining slow and fast. Seek the long stretch using
|
| 444 |
+
# the fast method, and seek the last 10s the slow way.
|
| 445 |
+
starttime = index / self._meta["fps"]
|
| 446 |
+
seek_slow = min(10, starttime)
|
| 447 |
+
seek_fast = starttime - seek_slow
|
| 448 |
+
# We used to have this epsilon earlier, when we did not use
|
| 449 |
+
# the slow seek. I don't think we need it anymore.
|
| 450 |
+
# epsilon = -1 / self._meta["fps"] * 0.1
|
| 451 |
+
iargs += ["-ss", "%.06f" % (seek_fast)]
|
| 452 |
+
oargs += ["-ss", "%.06f" % (seek_slow)]
|
| 453 |
+
|
| 454 |
+
# Output args, for writing to pipe
|
| 455 |
+
if self._arg_size:
|
| 456 |
+
oargs += ["-s", self._arg_size]
|
| 457 |
+
if self.request.kwargs.get("fps", None):
|
| 458 |
+
fps = float(self.request.kwargs["fps"])
|
| 459 |
+
oargs += ["-r", "%.02f" % fps]
|
| 460 |
+
oargs += self._arg_output_params
|
| 461 |
+
|
| 462 |
+
# Get pixelformat and bytes per pixel
|
| 463 |
+
pix_fmt = self._pix_fmt
|
| 464 |
+
bpp = self._depth * self._bytes_per_channel
|
| 465 |
+
|
| 466 |
+
# Create generator
|
| 467 |
+
rf = self._ffmpeg_api.read_frames
|
| 468 |
+
self._read_gen = rf(
|
| 469 |
+
self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
# Read meta data. This start the generator (and ffmpeg subprocess)
|
| 473 |
+
if self.request._video:
|
| 474 |
+
# With cameras, catch error and turn into IndexError
|
| 475 |
+
try:
|
| 476 |
+
meta = self._read_gen.__next__()
|
| 477 |
+
except IOError as err:
|
| 478 |
+
err_text = str(err)
|
| 479 |
+
if "darwin" in sys.platform:
|
| 480 |
+
if "Unknown input format: 'avfoundation'" in err_text:
|
| 481 |
+
err_text += (
|
| 482 |
+
"Try installing FFMPEG using "
|
| 483 |
+
"home brew to get a version with "
|
| 484 |
+
"support for cameras."
|
| 485 |
+
)
|
| 486 |
+
raise IndexError(
|
| 487 |
+
"No (working) camera at {}.\n\n{}".format(
|
| 488 |
+
self.request._video, err_text
|
| 489 |
+
)
|
| 490 |
+
)
|
| 491 |
+
else:
|
| 492 |
+
self._meta.update(meta)
|
| 493 |
+
elif index == 0:
|
| 494 |
+
self._meta.update(self._read_gen.__next__())
|
| 495 |
+
else:
|
| 496 |
+
self._read_gen.__next__() # we already have meta data
|
| 497 |
+
|
| 498 |
+
def _skip_frames(self, n=1):
|
| 499 |
+
"""Reads and throws away n frames"""
|
| 500 |
+
for i in range(n):
|
| 501 |
+
self._read_gen.__next__()
|
| 502 |
+
self._pos += n
|
| 503 |
+
|
| 504 |
+
def _read_frame(self):
|
| 505 |
+
# Read and convert to numpy array
|
| 506 |
+
w, h = self._meta["size"]
|
| 507 |
+
framesize = w * h * self._depth * self._bytes_per_channel
|
| 508 |
+
# t0 = time.time()
|
| 509 |
+
|
| 510 |
+
# Read frame
|
| 511 |
+
if self._frame_catcher: # pragma: no cover - camera thing
|
| 512 |
+
s, is_new = self._frame_catcher.get_frame()
|
| 513 |
+
else:
|
| 514 |
+
s = self._read_gen.__next__()
|
| 515 |
+
is_new = True
|
| 516 |
+
|
| 517 |
+
# Check
|
| 518 |
+
if len(s) != framesize:
|
| 519 |
+
raise RuntimeError(
|
| 520 |
+
"Frame is %i bytes, but expected %i." % (len(s), framesize)
|
| 521 |
+
)
|
| 522 |
+
|
| 523 |
+
result = np.frombuffer(s, dtype=self._dtype).copy()
|
| 524 |
+
result = result.reshape((h, w, self._depth))
|
| 525 |
+
# t1 = time.time()
|
| 526 |
+
# print('etime', t1-t0)
|
| 527 |
+
|
| 528 |
+
# Store and return
|
| 529 |
+
self._lastread = result
|
| 530 |
+
return result, is_new
|
| 531 |
+
|
| 532 |
+
# --
|
| 533 |
+
|
| 534 |
+
class Writer(Format.Writer):
|
| 535 |
+
_write_gen = None
|
| 536 |
+
|
| 537 |
+
def _open(
|
| 538 |
+
self,
|
| 539 |
+
fps=10,
|
| 540 |
+
codec="libx264",
|
| 541 |
+
bitrate=None,
|
| 542 |
+
pixelformat="yuv420p",
|
| 543 |
+
ffmpeg_params=None,
|
| 544 |
+
input_params=None,
|
| 545 |
+
output_params=None,
|
| 546 |
+
ffmpeg_log_level="quiet",
|
| 547 |
+
quality=5,
|
| 548 |
+
macro_block_size=16,
|
| 549 |
+
audio_path=None,
|
| 550 |
+
audio_codec=None,
|
| 551 |
+
):
|
| 552 |
+
self._ffmpeg_api = imageio_ffmpeg
|
| 553 |
+
self._filename = self.request.get_local_filename()
|
| 554 |
+
self._pix_fmt = None
|
| 555 |
+
self._depth = None
|
| 556 |
+
self._size = None
|
| 557 |
+
|
| 558 |
+
def _close(self):
|
| 559 |
+
if self._write_gen is not None:
|
| 560 |
+
self._write_gen.close()
|
| 561 |
+
self._write_gen = None
|
| 562 |
+
|
| 563 |
+
def _append_data(self, im, meta):
|
| 564 |
+
# Get props of image
|
| 565 |
+
h, w = im.shape[:2]
|
| 566 |
+
size = w, h
|
| 567 |
+
depth = 1 if im.ndim == 2 else im.shape[2]
|
| 568 |
+
|
| 569 |
+
# Ensure that image is in uint8
|
| 570 |
+
im = image_as_uint(im, bitdepth=8)
|
| 571 |
+
# To be written efficiently, ie. without creating an immutable
|
| 572 |
+
# buffer, by calling im.tobytes() the array must be contiguous.
|
| 573 |
+
if not im.flags.c_contiguous:
|
| 574 |
+
# checkign the flag is a micro optimization.
|
| 575 |
+
# the image will be a numpy subclass. See discussion
|
| 576 |
+
# https://github.com/numpy/numpy/issues/11804
|
| 577 |
+
im = np.ascontiguousarray(im)
|
| 578 |
+
|
| 579 |
+
# Set size and initialize if not initialized yet
|
| 580 |
+
if self._size is None:
|
| 581 |
+
map = {1: "gray", 2: "gray8a", 3: "rgb24", 4: "rgba"}
|
| 582 |
+
self._pix_fmt = map.get(depth, None)
|
| 583 |
+
if self._pix_fmt is None:
|
| 584 |
+
raise ValueError("Image must have 1, 2, 3 or 4 channels")
|
| 585 |
+
self._size = size
|
| 586 |
+
self._depth = depth
|
| 587 |
+
self._initialize()
|
| 588 |
+
|
| 589 |
+
# Check size of image
|
| 590 |
+
if size != self._size:
|
| 591 |
+
raise ValueError("All images in a movie should have same size")
|
| 592 |
+
if depth != self._depth:
|
| 593 |
+
raise ValueError(
|
| 594 |
+
"All images in a movie should have same " "number of channels"
|
| 595 |
+
)
|
| 596 |
+
|
| 597 |
+
assert self._write_gen is not None # Check status
|
| 598 |
+
|
| 599 |
+
# Write. Yes, we can send the data in as a numpy array
|
| 600 |
+
self._write_gen.send(im)
|
| 601 |
+
|
| 602 |
+
def set_meta_data(self, meta):
|
| 603 |
+
raise RuntimeError(
|
| 604 |
+
"The ffmpeg format does not support setting " "meta data."
|
| 605 |
+
)
|
| 606 |
+
|
| 607 |
+
def _initialize(self):
|
| 608 |
+
# Close existing generator
|
| 609 |
+
if self._write_gen is not None:
|
| 610 |
+
self._write_gen.close()
|
| 611 |
+
|
| 612 |
+
# Get parameters
|
| 613 |
+
# Use None to let imageio-ffmpeg (or ffmpeg) select good results
|
| 614 |
+
fps = self.request.kwargs.get("fps", 10)
|
| 615 |
+
codec = self.request.kwargs.get("codec", None)
|
| 616 |
+
bitrate = self.request.kwargs.get("bitrate", None)
|
| 617 |
+
quality = self.request.kwargs.get("quality", None)
|
| 618 |
+
input_params = self.request.kwargs.get("input_params") or []
|
| 619 |
+
output_params = self.request.kwargs.get("output_params") or []
|
| 620 |
+
output_params += self.request.kwargs.get("ffmpeg_params") or []
|
| 621 |
+
pixelformat = self.request.kwargs.get("pixelformat", None)
|
| 622 |
+
macro_block_size = self.request.kwargs.get("macro_block_size", 16)
|
| 623 |
+
ffmpeg_log_level = self.request.kwargs.get("ffmpeg_log_level", None)
|
| 624 |
+
audio_path = self.request.kwargs.get("audio_path", None)
|
| 625 |
+
audio_codec = self.request.kwargs.get("audio_codec", None)
|
| 626 |
+
|
| 627 |
+
macro_block_size = macro_block_size or 1 # None -> 1
|
| 628 |
+
|
| 629 |
+
# Create generator
|
| 630 |
+
self._write_gen = self._ffmpeg_api.write_frames(
|
| 631 |
+
self._filename,
|
| 632 |
+
self._size,
|
| 633 |
+
pix_fmt_in=self._pix_fmt,
|
| 634 |
+
pix_fmt_out=pixelformat,
|
| 635 |
+
fps=fps,
|
| 636 |
+
quality=quality,
|
| 637 |
+
bitrate=bitrate,
|
| 638 |
+
codec=codec,
|
| 639 |
+
macro_block_size=macro_block_size,
|
| 640 |
+
ffmpeg_log_level=ffmpeg_log_level,
|
| 641 |
+
input_params=input_params,
|
| 642 |
+
output_params=output_params,
|
| 643 |
+
audio_path=audio_path,
|
| 644 |
+
audio_codec=audio_codec,
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
# Seed the generator (this is where the ffmpeg subprocess starts)
|
| 648 |
+
self._write_gen.send(None)
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
class FrameCatcher(threading.Thread):
|
| 652 |
+
"""Thread to keep reading the frame data from stdout. This is
|
| 653 |
+
useful when streaming from a webcam. Otherwise, if the user code
|
| 654 |
+
does not grab frames fast enough, the buffer will fill up, leading
|
| 655 |
+
to lag, and ffmpeg can also stall (experienced on Linux). The
|
| 656 |
+
get_frame() method always returns the last available image.
|
| 657 |
+
"""
|
| 658 |
+
|
| 659 |
+
def __init__(self, gen):
|
| 660 |
+
self._gen = gen
|
| 661 |
+
self._frame = None
|
| 662 |
+
self._frame_is_new = False
|
| 663 |
+
self._lock = threading.RLock()
|
| 664 |
+
threading.Thread.__init__(self)
|
| 665 |
+
self.daemon = True # do not let this thread hold up Python shutdown
|
| 666 |
+
self._should_stop = False
|
| 667 |
+
self.start()
|
| 668 |
+
|
| 669 |
+
def stop_me(self):
|
| 670 |
+
self._should_stop = True
|
| 671 |
+
while self.is_alive():
|
| 672 |
+
time.sleep(0.001)
|
| 673 |
+
|
| 674 |
+
def get_frame(self):
|
| 675 |
+
while self._frame is None: # pragma: no cover - an init thing
|
| 676 |
+
time.sleep(0.001)
|
| 677 |
+
with self._lock:
|
| 678 |
+
is_new = self._frame_is_new
|
| 679 |
+
self._frame_is_new = False # reset
|
| 680 |
+
return self._frame, is_new
|
| 681 |
+
|
| 682 |
+
def run(self):
|
| 683 |
+
# This runs in the worker thread
|
| 684 |
+
try:
|
| 685 |
+
while not self._should_stop:
|
| 686 |
+
time.sleep(0) # give control to other threads
|
| 687 |
+
frame = self._gen.__next__()
|
| 688 |
+
with self._lock:
|
| 689 |
+
self._frame = frame
|
| 690 |
+
self._frame_is_new = True
|
| 691 |
+
except (StopIteration, EOFError):
|
| 692 |
+
pass
|
| 693 |
+
|
| 694 |
+
|
| 695 |
+
def parse_device_names(ffmpeg_output):
|
| 696 |
+
"""Parse the output of the ffmpeg -list-devices command"""
|
| 697 |
+
# Collect device names - get [friendly_name, alt_name] of each
|
| 698 |
+
device_names = []
|
| 699 |
+
in_video_devices = False
|
| 700 |
+
for line in ffmpeg_output.splitlines():
|
| 701 |
+
if line.startswith("[dshow"):
|
| 702 |
+
logger.debug(line)
|
| 703 |
+
line = line.split("]", 1)[1].strip()
|
| 704 |
+
if in_video_devices and line.startswith('"'):
|
| 705 |
+
friendly_name = line[1:-1]
|
| 706 |
+
device_names.append([friendly_name, ""])
|
| 707 |
+
elif in_video_devices and line.lower().startswith("alternative name"):
|
| 708 |
+
alt_name = line.split(" name ", 1)[1].strip()[1:-1]
|
| 709 |
+
if sys.platform.startswith("win"):
|
| 710 |
+
alt_name = alt_name.replace("&", "^&") # Tested to work
|
| 711 |
+
else:
|
| 712 |
+
alt_name = alt_name.replace("&", "\\&") # Does this work?
|
| 713 |
+
device_names[-1][-1] = alt_name
|
| 714 |
+
elif "video devices" in line:
|
| 715 |
+
in_video_devices = True
|
| 716 |
+
elif "devices" in line:
|
| 717 |
+
# set False for subsequent "devices" sections
|
| 718 |
+
in_video_devices = False
|
| 719 |
+
# Post-process, see #441
|
| 720 |
+
# prefer friendly names, use alt name if two cams have same friendly name
|
| 721 |
+
device_names2 = []
|
| 722 |
+
for friendly_name, alt_name in device_names:
|
| 723 |
+
if friendly_name not in device_names2:
|
| 724 |
+
device_names2.append(friendly_name)
|
| 725 |
+
elif alt_name:
|
| 726 |
+
device_names2.append(alt_name)
|
| 727 |
+
else:
|
| 728 |
+
device_names2.append(friendly_name) # duplicate, but not much we can do
|
| 729 |
+
return device_names2
|