diff --git a/.gitattributes b/.gitattributes index 636d8530afcb72d184f17dba7383f59838e678d6..2fb807263be3b71460cee24c8aa8102293d502bf 100644 --- a/.gitattributes +++ b/.gitattributes @@ -156,3 +156,6 @@ parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/internal_api. parrot/lib/libgomp.so.1 filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/_async_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/libquadmath.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +parrot/lib/libquadmath.so.0 filter=lfs diff=lfs merge=lfs -text +parrot/lib/libitm.so.1 filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/libitm.so.1 b/parrot/lib/libitm.so.1 new file mode 100644 index 0000000000000000000000000000000000000000..f5eb8be8f056fa3817339919908a70a570eab6fa --- /dev/null +++ b/parrot/lib/libitm.so.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70a7a1a8352b39da726e026874f1854096cdd1c60e80ea5cf97a4e38055ea7c1 +size 1018904 diff --git a/parrot/lib/libquadmath.so.0 b/parrot/lib/libquadmath.so.0 new file mode 100644 index 0000000000000000000000000000000000000000..cd082c15339041642f25b85e32bd65b9e0393619 --- /dev/null +++ b/parrot/lib/libquadmath.so.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10c6fadba4c2f6d77e836a50aadbd92e95b137a85eb01b1ca183b50d8f39a2c6 +size 1009408 diff --git a/parrot/lib/python3.10/site-packages/git/__pycache__/config.cpython-310.pyc b/parrot/lib/python3.10/site-packages/git/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54ea4ca6b678603068bc8e5e505e7ca85d46968e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/git/__pycache__/config.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/git/__pycache__/diff.cpython-310.pyc b/parrot/lib/python3.10/site-packages/git/__pycache__/diff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a38ef0fb0bb1d69b70e8bcd80f04ac27d7fcd46 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/git/__pycache__/diff.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/git/__pycache__/util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/git/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccef0e5a3a31a061fb02a8476e98a03da36f5990 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/git/__pycache__/util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/git/index/__init__.py b/parrot/lib/python3.10/site-packages/git/index/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba48110fddcdf549b6e61f115d28b137a57cc219 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/git/index/__init__.py @@ -0,0 +1,16 @@ +# This module is part of GitPython and is released under the +# 3-Clause BSD License: https://opensource.org/license/bsd-3-clause/ + +"""Initialize the index package.""" + +__all__ = [ + "BaseIndexEntry", + "BlobFilter", + "CheckoutError", + "IndexEntry", + "IndexFile", + "StageType", +] + +from .base import CheckoutError, IndexFile +from .typ import BaseIndexEntry, BlobFilter, IndexEntry, StageType diff --git a/parrot/lib/python3.10/site-packages/git/index/__pycache__/typ.cpython-310.pyc b/parrot/lib/python3.10/site-packages/git/index/__pycache__/typ.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae2c76983cb5da4f7e0ab6c278f1df12ce4b4ab9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/git/index/__pycache__/typ.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/git/index/__pycache__/util.cpython-310.pyc b/parrot/lib/python3.10/site-packages/git/index/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adaf29a55b2692fb09f8948c8be3fc4c887618aa Binary files /dev/null and b/parrot/lib/python3.10/site-packages/git/index/__pycache__/util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/git/index/base.py b/parrot/lib/python3.10/site-packages/git/index/base.py new file mode 100644 index 0000000000000000000000000000000000000000..b8161ea52810ed8ba58c2c5052ac86f3517ecd18 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/git/index/base.py @@ -0,0 +1,1518 @@ +# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors +# +# This module is part of GitPython and is released under the +# 3-Clause BSD License: https://opensource.org/license/bsd-3-clause/ + +"""Module containing :class:`IndexFile`, an Index implementation facilitating all kinds +of index manipulations such as querying and merging.""" + +__all__ = ["IndexFile", "CheckoutError", "StageType"] + +import contextlib +import datetime +import glob +from io import BytesIO +import os +import os.path as osp +from stat import S_ISLNK +import subprocess +import sys +import tempfile + +from gitdb.base import IStream +from gitdb.db import MemoryDB + +from git.compat import defenc, force_bytes +import git.diff as git_diff +from git.exc import CheckoutError, GitCommandError, GitError, InvalidGitRepositoryError +from git.objects import Blob, Commit, Object, Submodule, Tree +from git.objects.util import Serializable +from git.util import ( + LazyMixin, + LockedFD, + join_path_native, + file_contents_ro, + to_native_path_linux, + unbare_repo, + to_bin_sha, +) + +from .fun import ( + S_IFGITLINK, + aggressive_tree_merge, + entry_key, + read_cache, + run_commit_hook, + stat_mode_to_index_mode, + write_cache, + write_tree_from_cache, +) +from .typ import BaseIndexEntry, IndexEntry, StageType +from .util import TemporaryFileSwap, post_clear_cache, default_index, git_working_dir + +# typing ----------------------------------------------------------------------------- + +from typing import ( + Any, + BinaryIO, + Callable, + Dict, + Generator, + IO, + Iterable, + Iterator, + List, + NoReturn, + Sequence, + TYPE_CHECKING, + Tuple, + Union, +) + +from git.types import Literal, PathLike + +if TYPE_CHECKING: + from subprocess import Popen + + from git.refs.reference import Reference + from git.repo import Repo + from git.util import Actor + + +Treeish = Union[Tree, Commit, str, bytes] + +# ------------------------------------------------------------------------------------ + + +@contextlib.contextmanager +def _named_temporary_file_for_subprocess(directory: PathLike) -> Generator[str, None, None]: + """Create a named temporary file git subprocesses can open, deleting it afterward. + + :param directory: + The directory in which the file is created. + + :return: + A context manager object that creates the file and provides its name on entry, + and deletes it on exit. + """ + if sys.platform == "win32": + fd, name = tempfile.mkstemp(dir=directory) + os.close(fd) + try: + yield name + finally: + os.remove(name) + else: + with tempfile.NamedTemporaryFile(dir=directory) as ctx: + yield ctx.name + + +class IndexFile(LazyMixin, git_diff.Diffable, Serializable): + """An Index that can be manipulated using a native implementation in order to save + git command function calls wherever possible. + + This provides custom merging facilities allowing to merge without actually changing + your index or your working tree. This way you can perform your own test merges based + on the index only without having to deal with the working copy. This is useful in + case of partial working trees. + + Entries: + + The index contains an entries dict whose keys are tuples of type + :class:`~git.index.typ.IndexEntry` to facilitate access. + + You may read the entries dict or manipulate it using IndexEntry instance, i.e.:: + + index.entries[index.entry_key(index_entry_instance)] = index_entry_instance + + Make sure you use :meth:`index.write() ` once you are done manipulating the + index directly before operating on it using the git command. + """ + + __slots__ = ("repo", "version", "entries", "_extension_data", "_file_path") + + _VERSION = 2 + """The latest version we support.""" + + S_IFGITLINK = S_IFGITLINK + """Flags for a submodule.""" + + def __init__(self, repo: "Repo", file_path: Union[PathLike, None] = None) -> None: + """Initialize this Index instance, optionally from the given `file_path`. + + If no `file_path` is given, we will be created from the current index file. + + If a stream is not given, the stream will be initialized from the current + repository's index on demand. + """ + self.repo = repo + self.version = self._VERSION + self._extension_data = b"" + self._file_path: PathLike = file_path or self._index_path() + + def _set_cache_(self, attr: str) -> None: + if attr == "entries": + try: + fd = os.open(self._file_path, os.O_RDONLY) + except OSError: + # In new repositories, there may be no index, which means we are empty. + self.entries: Dict[Tuple[PathLike, StageType], IndexEntry] = {} + return + # END exception handling + + try: + stream = file_contents_ro(fd, stream=True, allow_mmap=True) + finally: + os.close(fd) + + self._deserialize(stream) + else: + super()._set_cache_(attr) + + def _index_path(self) -> PathLike: + if self.repo.git_dir: + return join_path_native(self.repo.git_dir, "index") + else: + raise GitCommandError("No git directory given to join index path") + + @property + def path(self) -> PathLike: + """:return: Path to the index file we are representing""" + return self._file_path + + def _delete_entries_cache(self) -> None: + """Safely clear the entries cache so it can be recreated.""" + try: + del self.entries + except AttributeError: + # It failed in Python 2.6.5 with AttributeError. + # FIXME: Look into whether we can just remove this except clause now. + pass + # END exception handling + + # { Serializable Interface + + def _deserialize(self, stream: IO) -> "IndexFile": + """Initialize this instance with index values read from the given stream.""" + self.version, self.entries, self._extension_data, _conten_sha = read_cache(stream) + return self + + def _entries_sorted(self) -> List[IndexEntry]: + """:return: List of entries, in a sorted fashion, first by path, then by stage""" + return sorted(self.entries.values(), key=lambda e: (e.path, e.stage)) + + def _serialize(self, stream: IO, ignore_extension_data: bool = False) -> "IndexFile": + entries = self._entries_sorted() + extension_data = self._extension_data # type: Union[None, bytes] + if ignore_extension_data: + extension_data = None + write_cache(entries, stream, extension_data) + return self + + # } END serializable interface + + def write( + self, + file_path: Union[None, PathLike] = None, + ignore_extension_data: bool = False, + ) -> None: + """Write the current state to our file path or to the given one. + + :param file_path: + If ``None``, we will write to our stored file path from which we have been + initialized. Otherwise we write to the given file path. Please note that + this will change the `file_path` of this index to the one you gave. + + :param ignore_extension_data: + If ``True``, the TREE type extension data read in the index will not be + written to disk. NOTE that no extension data is actually written. Use this + if you have altered the index and would like to use + :manpage:`git-write-tree(1)` afterwards to create a tree representing your + written changes. If this data is present in the written index, + :manpage:`git-write-tree(1)` will instead write the stored/cached tree. + Alternatively, use :meth:`write_tree` to handle this case automatically. + """ + # Make sure we have our entries read before getting a write lock. + # Otherwise it would be done when streaming. + # This can happen if one doesn't change the index, but writes it right away. + self.entries # noqa: B018 + lfd = LockedFD(file_path or self._file_path) + stream = lfd.open(write=True, stream=True) + + try: + self._serialize(stream, ignore_extension_data) + except BaseException: + lfd.rollback() + raise + + lfd.commit() + + # Make sure we represent what we have written. + if file_path is not None: + self._file_path = file_path + + @post_clear_cache + @default_index + def merge_tree(self, rhs: Treeish, base: Union[None, Treeish] = None) -> "IndexFile": + """Merge the given `rhs` treeish into the current index, possibly taking + a common base treeish into account. + + As opposed to the :func:`from_tree` method, this allows you to use an already + existing tree as the left side of the merge. + + :param rhs: + Treeish reference pointing to the 'other' side of the merge. + + :param base: + Optional treeish reference pointing to the common base of `rhs` and this + index which equals lhs. + + :return: + self (containing the merge and possibly unmerged entries in case of + conflicts) + + :raise git.exc.GitCommandError: + If there is a merge conflict. The error will be raised at the first + conflicting path. If you want to have proper merge resolution to be done by + yourself, you have to commit the changed index (or make a valid tree from + it) and retry with a three-way :meth:`index.from_tree ` call. + """ + # -i : ignore working tree status + # --aggressive : handle more merge cases + # -m : do an actual merge + args: List[Union[Treeish, str]] = ["--aggressive", "-i", "-m"] + if base is not None: + args.append(base) + args.append(rhs) + + self.repo.git.read_tree(args) + return self + + @classmethod + def new(cls, repo: "Repo", *tree_sha: Union[str, Tree]) -> "IndexFile": + """Merge the given treeish revisions into a new index which is returned. + + This method behaves like ``git-read-tree --aggressive`` when doing the merge. + + :param repo: + The repository treeish are located in. + + :param tree_sha: + 20 byte or 40 byte tree sha or tree objects. + + :return: + New :class:`IndexFile` instance. Its path will be undefined. + If you intend to write such a merged Index, supply an alternate + ``file_path`` to its :meth:`write` method. + """ + tree_sha_bytes: List[bytes] = [to_bin_sha(str(t)) for t in tree_sha] + base_entries = aggressive_tree_merge(repo.odb, tree_sha_bytes) + + inst = cls(repo) + # Convert to entries dict. + entries: Dict[Tuple[PathLike, int], IndexEntry] = dict( + zip( + ((e.path, e.stage) for e in base_entries), + (IndexEntry.from_base(e) for e in base_entries), + ) + ) + + inst.entries = entries + return inst + + @classmethod + def from_tree(cls, repo: "Repo", *treeish: Treeish, **kwargs: Any) -> "IndexFile": + R"""Merge the given treeish revisions into a new index which is returned. + The original index will remain unaltered. + + :param repo: + The repository treeish are located in. + + :param treeish: + One, two or three :class:`~git.objects.tree.Tree` objects, + :class:`~git.objects.commit.Commit`\s or 40 byte hexshas. + + The result changes according to the amount of trees: + + 1. If 1 Tree is given, it will just be read into a new index. + 2. If 2 Trees are given, they will be merged into a new index using a two + way merge algorithm. Tree 1 is the 'current' tree, tree 2 is the 'other' + one. It behaves like a fast-forward. + 3. If 3 Trees are given, a 3-way merge will be performed with the first tree + being the common ancestor of tree 2 and tree 3. Tree 2 is the 'current' + tree, tree 3 is the 'other' one. + + :param kwargs: + Additional arguments passed to :manpage:`git-read-tree(1)`. + + :return: + New :class:`IndexFile` instance. It will point to a temporary index location + which does not exist anymore. If you intend to write such a merged Index, + supply an alternate ``file_path`` to its :meth:`write` method. + + :note: + In the three-way merge case, ``--aggressive`` will be specified to + automatically resolve more cases in a commonly correct manner. Specify + ``trivial=True`` as a keyword argument to override that. + + As the underlying :manpage:`git-read-tree(1)` command takes into account the + current index, it will be temporarily moved out of the way to prevent any + unexpected interference. + """ + if len(treeish) == 0 or len(treeish) > 3: + raise ValueError("Please specify between 1 and 3 treeish, got %i" % len(treeish)) + + arg_list: List[Union[Treeish, str]] = [] + # Ignore that the working tree and index possibly are out of date. + if len(treeish) > 1: + # Drop unmerged entries when reading our index and merging. + arg_list.append("--reset") + # Handle non-trivial cases the way a real merge does. + arg_list.append("--aggressive") + # END merge handling + + # Create the temporary file in the .git directory to be sure renaming + # works - /tmp/ directories could be on another device. + with _named_temporary_file_for_subprocess(repo.git_dir) as tmp_index: + arg_list.append("--index-output=%s" % tmp_index) + arg_list.extend(treeish) + + # Move the current index out of the way - otherwise the merge may fail as it + # considers existing entries. Moving it essentially clears the index. + # Unfortunately there is no 'soft' way to do it. + # The TemporaryFileSwap ensures the original file gets put back. + with TemporaryFileSwap(join_path_native(repo.git_dir, "index")): + repo.git.read_tree(*arg_list, **kwargs) + index = cls(repo, tmp_index) + index.entries # noqa: B018 # Force it to read the file as we will delete the temp-file. + return index + # END index merge handling + + # UTILITIES + + @unbare_repo + def _iter_expand_paths(self: "IndexFile", paths: Sequence[PathLike]) -> Iterator[PathLike]: + """Expand the directories in list of paths to the corresponding paths + accordingly. + + :note: + git will add items multiple times even if a glob overlapped with manually + specified paths or if paths where specified multiple times - we respect that + and do not prune. + """ + + def raise_exc(e: Exception) -> NoReturn: + raise e + + r = str(self.repo.working_tree_dir) + rs = r + os.sep + for path in paths: + abs_path = str(path) + if not osp.isabs(abs_path): + abs_path = osp.join(r, path) + # END make absolute path + + try: + st = os.lstat(abs_path) # Handles non-symlinks as well. + except OSError: + # The lstat call may fail as the path may contain globs as well. + pass + else: + if S_ISLNK(st.st_mode): + yield abs_path.replace(rs, "") + continue + # END check symlink + + # If the path is not already pointing to an existing file, resolve globs if possible. + if not os.path.exists(abs_path) and ("?" in abs_path or "*" in abs_path or "[" in abs_path): + resolved_paths = glob.glob(abs_path) + # not abs_path in resolved_paths: + # A glob() resolving to the same path we are feeding it with is a + # glob() that failed to resolve. If we continued calling ourselves + # we'd endlessly recurse. If the condition below evaluates to true + # then we are likely dealing with a file whose name contains wildcard + # characters. + if abs_path not in resolved_paths: + for f in self._iter_expand_paths(glob.glob(abs_path)): + yield str(f).replace(rs, "") + continue + # END glob handling + try: + for root, _dirs, files in os.walk(abs_path, onerror=raise_exc): + for rela_file in files: + # Add relative paths only. + yield osp.join(root.replace(rs, ""), rela_file) + # END for each file in subdir + # END for each subdirectory + except OSError: + # It was a file or something that could not be iterated. + yield abs_path.replace(rs, "") + # END path exception handling + # END for each path + + def _write_path_to_stdin( + self, + proc: "Popen", + filepath: PathLike, + item: PathLike, + fmakeexc: Callable[..., GitError], + fprogress: Callable[[PathLike, bool, PathLike], None], + read_from_stdout: bool = True, + ) -> Union[None, str]: + """Write path to ``proc.stdin`` and make sure it processes the item, including + progress. + + :return: + stdout string + + :param read_from_stdout: + If ``True``, ``proc.stdout`` will be read after the item was sent to stdin. + In that case, it will return ``None``. + + :note: + There is a bug in :manpage:`git-update-index(1)` that prevents it from + sending reports just in time. This is why we have a version that tries to + read stdout and one which doesn't. In fact, the stdout is not important as + the piped-in files are processed anyway and just in time. + + :note: + Newlines are essential here, git's behaviour is somewhat inconsistent on + this depending on the version, hence we try our best to deal with newlines + carefully. Usually the last newline will not be sent, instead we will close + stdin to break the pipe. + """ + fprogress(filepath, False, item) + rval: Union[None, str] = None + + if proc.stdin is not None: + try: + proc.stdin.write(("%s\n" % filepath).encode(defenc)) + except IOError as e: + # Pipe broke, usually because some error happened. + raise fmakeexc() from e + # END write exception handling + proc.stdin.flush() + + if read_from_stdout and proc.stdout is not None: + rval = proc.stdout.readline().strip() + fprogress(filepath, True, item) + return rval + + def iter_blobs( + self, predicate: Callable[[Tuple[StageType, Blob]], bool] = lambda t: True + ) -> Iterator[Tuple[StageType, Blob]]: + """ + :return: + Iterator yielding tuples of :class:`~git.objects.blob.Blob` objects and + stages, tuple(stage, Blob). + + :param predicate: + Function(t) returning ``True`` if tuple(stage, Blob) should be yielded by + the iterator. A default filter, the `~git.index.typ.BlobFilter`, allows you + to yield blobs only if they match a given list of paths. + """ + for entry in self.entries.values(): + blob = entry.to_blob(self.repo) + blob.size = entry.size + output = (entry.stage, blob) + if predicate(output): + yield output + # END for each entry + + def unmerged_blobs(self) -> Dict[PathLike, List[Tuple[StageType, Blob]]]: + """ + :return: + Dict(path : list(tuple(stage, Blob, ...))), being a dictionary associating a + path in the index with a list containing sorted stage/blob pairs. + + :note: + Blobs that have been removed in one side simply do not exist in the given + stage. That is, a file removed on the 'other' branch whose entries are at + stage 3 will not have a stage 3 entry. + """ + is_unmerged_blob = lambda t: t[0] != 0 + path_map: Dict[PathLike, List[Tuple[StageType, Blob]]] = {} + for stage, blob in self.iter_blobs(is_unmerged_blob): + path_map.setdefault(blob.path, []).append((stage, blob)) + # END for each unmerged blob + for line in path_map.values(): + line.sort() + + return path_map + + @classmethod + def entry_key(cls, *entry: Union[BaseIndexEntry, PathLike, StageType]) -> Tuple[PathLike, StageType]: + return entry_key(*entry) + + def resolve_blobs(self, iter_blobs: Iterator[Blob]) -> "IndexFile": + """Resolve the blobs given in blob iterator. + + This will effectively remove the index entries of the respective path at all + non-null stages and add the given blob as new stage null blob. + + For each path there may only be one blob, otherwise a :exc:`ValueError` will be + raised claiming the path is already at stage 0. + + :raise ValueError: + If one of the blobs already existed at stage 0. + + :return: + self + + :note: + You will have to write the index manually once you are done, i.e. + ``index.resolve_blobs(blobs).write()``. + """ + for blob in iter_blobs: + stage_null_key = (blob.path, 0) + if stage_null_key in self.entries: + raise ValueError("Path %r already exists at stage 0" % str(blob.path)) + # END assert blob is not stage 0 already + + # Delete all possible stages. + for stage in (1, 2, 3): + try: + del self.entries[(blob.path, stage)] + except KeyError: + pass + # END ignore key errors + # END for each possible stage + + self.entries[stage_null_key] = IndexEntry.from_blob(blob) + # END for each blob + + return self + + def update(self) -> "IndexFile": + """Reread the contents of our index file, discarding all cached information + we might have. + + :note: + This is a possibly dangerous operations as it will discard your changes to + :attr:`index.entries `. + + :return: + self + """ + self._delete_entries_cache() + # Allows to lazily reread on demand. + return self + + def write_tree(self) -> Tree: + """Write this index to a corresponding :class:`~git.objects.tree.Tree` object + into the repository's object database and return it. + + :return: + :class:`~git.objects.tree.Tree` object representing this index. + + :note: + The tree will be written even if one or more objects the tree refers to does + not yet exist in the object database. This could happen if you added entries + to the index directly. + + :raise ValueError: + If there are no entries in the cache. + + :raise git.exc.UnmergedEntriesError: + """ + # We obtain no lock as we just flush our contents to disk as tree. + # If we are a new index, the entries access will load our data accordingly. + mdb = MemoryDB() + entries = self._entries_sorted() + binsha, tree_items = write_tree_from_cache(entries, mdb, slice(0, len(entries))) + + # Copy changed trees only. + mdb.stream_copy(mdb.sha_iter(), self.repo.odb) + + # Note: Additional deserialization could be saved if write_tree_from_cache would + # return sorted tree entries. + root_tree = Tree(self.repo, binsha, path="") + root_tree._cache = tree_items + return root_tree + + def _process_diff_args( + self, + args: List[Union[PathLike, "git_diff.Diffable"]], + ) -> List[Union[PathLike, "git_diff.Diffable"]]: + try: + args.pop(args.index(self)) + except IndexError: + pass + # END remove self + return args + + def _to_relative_path(self, path: PathLike) -> PathLike: + """ + :return: + Version of path relative to our git directory or raise :exc:`ValueError` if + it is not within our git directory. + + :raise ValueError: + """ + if not osp.isabs(path): + return path + if self.repo.bare: + raise InvalidGitRepositoryError("require non-bare repository") + if not str(path).startswith(str(self.repo.working_tree_dir)): + raise ValueError("Absolute path %r is not in git repository at %r" % (path, self.repo.working_tree_dir)) + return os.path.relpath(path, self.repo.working_tree_dir) + + def _preprocess_add_items( + self, items: Sequence[Union[PathLike, Blob, BaseIndexEntry, "Submodule"]] + ) -> Tuple[List[PathLike], List[BaseIndexEntry]]: + """Split the items into two lists of path strings and BaseEntries.""" + paths = [] + entries = [] + # if it is a string put in list + if isinstance(items, (str, os.PathLike)): + items = [items] + + for item in items: + if isinstance(item, (str, os.PathLike)): + paths.append(self._to_relative_path(item)) + elif isinstance(item, (Blob, Submodule)): + entries.append(BaseIndexEntry.from_blob(item)) + elif isinstance(item, BaseIndexEntry): + entries.append(item) + else: + raise TypeError("Invalid Type: %r" % item) + # END for each item + return paths, entries + + def _store_path(self, filepath: PathLike, fprogress: Callable) -> BaseIndexEntry: + """Store file at filepath in the database and return the base index entry. + + :note: + This needs the :func:`~git.index.util.git_working_dir` decorator active! + This must be ensured in the calling code. + """ + st = os.lstat(filepath) # Handles non-symlinks as well. + if S_ISLNK(st.st_mode): + # In PY3, readlink is a string, but we need bytes. + # In PY2, it was just OS encoded bytes, we assumed UTF-8. + open_stream: Callable[[], BinaryIO] = lambda: BytesIO(force_bytes(os.readlink(filepath), encoding=defenc)) + else: + open_stream = lambda: open(filepath, "rb") + with open_stream() as stream: + fprogress(filepath, False, filepath) + istream = self.repo.odb.store(IStream(Blob.type, st.st_size, stream)) + fprogress(filepath, True, filepath) + return BaseIndexEntry( + ( + stat_mode_to_index_mode(st.st_mode), + istream.binsha, + 0, + to_native_path_linux(filepath), + ) + ) + + @unbare_repo + @git_working_dir + def _entries_for_paths( + self, + paths: List[str], + path_rewriter: Union[Callable, None], + fprogress: Callable, + entries: List[BaseIndexEntry], + ) -> List[BaseIndexEntry]: + entries_added: List[BaseIndexEntry] = [] + if path_rewriter: + for path in paths: + if osp.isabs(path): + abspath = path + gitrelative_path = path[len(str(self.repo.working_tree_dir)) + 1 :] + else: + gitrelative_path = path + if self.repo.working_tree_dir: + abspath = osp.join(self.repo.working_tree_dir, gitrelative_path) + # END obtain relative and absolute paths + + blob = Blob( + self.repo, + Blob.NULL_BIN_SHA, + stat_mode_to_index_mode(os.stat(abspath).st_mode), + to_native_path_linux(gitrelative_path), + ) + # TODO: variable undefined + entries.append(BaseIndexEntry.from_blob(blob)) + # END for each path + del paths[:] + # END rewrite paths + + # HANDLE PATHS + assert len(entries_added) == 0 + for filepath in self._iter_expand_paths(paths): + entries_added.append(self._store_path(filepath, fprogress)) + # END for each filepath + # END path handling + return entries_added + + def add( + self, + items: Sequence[Union[PathLike, Blob, BaseIndexEntry, "Submodule"]], + force: bool = True, + fprogress: Callable = lambda *args: None, + path_rewriter: Union[Callable[..., PathLike], None] = None, + write: bool = True, + write_extension_data: bool = False, + ) -> List[BaseIndexEntry]: + R"""Add files from the working tree, specific blobs, or + :class:`~git.index.typ.BaseIndexEntry`\s to the index. + + :param items: + Multiple types of items are supported, types can be mixed within one call. + Different types imply a different handling. File paths may generally be + relative or absolute. + + - path string + + Strings denote a relative or absolute path into the repository pointing + to an existing file, e.g., ``CHANGES``, `lib/myfile.ext``, + ``/home/gitrepo/lib/myfile.ext``. + + Absolute paths must start with working tree directory of this index's + repository to be considered valid. For example, if it was initialized + with a non-normalized path, like ``/root/repo/../repo``, absolute paths + to be added must start with ``/root/repo/../repo``. + + Paths provided like this must exist. When added, they will be written + into the object database. + + PathStrings may contain globs, such as ``lib/__init__*``. Or they can be + directories like ``lib``, which will add all the files within the + directory and subdirectories. + + This equals a straight :manpage:`git-add(1)`. + + They are added at stage 0. + + - :class:~`git.objects.blob.Blob` or + :class:`~git.objects.submodule.base.Submodule` object + + Blobs are added as they are assuming a valid mode is set. + + The file they refer to may or may not exist in the file system, but must + be a path relative to our repository. + + If their sha is null (40*0), their path must exist in the file system + relative to the git repository as an object will be created from the + data at the path. + + The handling now very much equals the way string paths are processed, + except that the mode you have set will be kept. This allows you to + create symlinks by settings the mode respectively and writing the target + of the symlink directly into the file. This equals a default Linux + symlink which is not dereferenced automatically, except that it can be + created on filesystems not supporting it as well. + + Please note that globs or directories are not allowed in + :class:`~git.objects.blob.Blob` objects. + + They are added at stage 0. + + - :class:`~git.index.typ.BaseIndexEntry` or type + + Handling equals the one of :class:~`git.objects.blob.Blob` objects, but + the stage may be explicitly set. Please note that Index Entries require + binary sha's. + + :param force: + **CURRENTLY INEFFECTIVE** + If ``True``, otherwise ignored or excluded files will be added anyway. As + opposed to the :manpage:`git-add(1)` command, we enable this flag by default + as the API user usually wants the item to be added even though they might be + excluded. + + :param fprogress: + Function with signature ``f(path, done=False, item=item)`` called for each + path to be added, one time once it is about to be added where ``done=False`` + and once after it was added where ``done=True``. + + ``item`` is set to the actual item we handle, either a path or a + :class:`~git.index.typ.BaseIndexEntry`. + + Please note that the processed path is not guaranteed to be present in the + index already as the index is currently being processed. + + :param path_rewriter: + Function, with signature ``(string) func(BaseIndexEntry)``, returning a path + for each passed entry which is the path to be actually recorded for the + object created from :attr:`entry.path `. + This allows you to write an index which is not identical to the layout of + the actual files on your hard-disk. If not ``None`` and `items` contain + plain paths, these paths will be converted to Entries beforehand and passed + to the path_rewriter. Please note that ``entry.path`` is relative to the git + repository. + + :param write: + If ``True``, the index will be written once it was altered. Otherwise the + changes only exist in memory and are not available to git commands. + + :param write_extension_data: + If ``True``, extension data will be written back to the index. This can lead + to issues in case it is containing the 'TREE' extension, which will cause + the :manpage:`git-commit(1)` command to write an old tree, instead of a new + one representing the now changed index. + + This doesn't matter if you use :meth:`IndexFile.commit`, which ignores the + 'TREE' extension altogether. You should set it to ``True`` if you intend to + use :meth:`IndexFile.commit` exclusively while maintaining support for + third-party extensions. Besides that, you can usually safely ignore the + built-in extensions when using GitPython on repositories that are not + handled manually at all. + + All current built-in extensions are listed here: + https://git-scm.com/docs/index-format + + :return: + List of :class:`~git.index.typ.BaseIndexEntry`\s representing the entries + just actually added. + + :raise OSError: + If a supplied path did not exist. Please note that + :class:`~git.index.typ.BaseIndexEntry` objects that do not have a null sha + will be added even if their paths do not exist. + """ + # Sort the entries into strings and Entries. + # Blobs are converted to entries automatically. + # Paths can be git-added. For everything else we use git-update-index. + paths, entries = self._preprocess_add_items(items) + entries_added: List[BaseIndexEntry] = [] + # This code needs a working tree, so we try not to run it unless required. + # That way, we are OK on a bare repository as well. + # If there are no paths, the rewriter has nothing to do either. + if paths: + entries_added.extend(self._entries_for_paths(paths, path_rewriter, fprogress, entries)) + + # HANDLE ENTRIES + if entries: + null_mode_entries = [e for e in entries if e.mode == 0] + if null_mode_entries: + raise ValueError( + "At least one Entry has a null-mode - please use index.remove to remove files for clarity" + ) + # END null mode should be remove + + # HANDLE ENTRY OBJECT CREATION + # Create objects if required, otherwise go with the existing shas. + null_entries_indices = [i for i, e in enumerate(entries) if e.binsha == Object.NULL_BIN_SHA] + if null_entries_indices: + + @git_working_dir + def handle_null_entries(self: "IndexFile") -> None: + for ei in null_entries_indices: + null_entry = entries[ei] + new_entry = self._store_path(null_entry.path, fprogress) + + # Update null entry. + entries[ei] = BaseIndexEntry( + ( + null_entry.mode, + new_entry.binsha, + null_entry.stage, + null_entry.path, + ) + ) + # END for each entry index + + # END closure + + handle_null_entries(self) + # END null_entry handling + + # REWRITE PATHS + # If we have to rewrite the entries, do so now, after we have generated all + # object sha's. + if path_rewriter: + for i, e in enumerate(entries): + entries[i] = BaseIndexEntry((e.mode, e.binsha, e.stage, path_rewriter(e))) + # END for each entry + # END handle path rewriting + + # Just go through the remaining entries and provide progress info. + for i, entry in enumerate(entries): + progress_sent = i in null_entries_indices + if not progress_sent: + fprogress(entry.path, False, entry) + fprogress(entry.path, True, entry) + # END handle progress + # END for each entry + entries_added.extend(entries) + # END if there are base entries + + # FINALIZE + # Add the new entries to this instance. + for entry in entries_added: + self.entries[(entry.path, 0)] = IndexEntry.from_base(entry) + + if write: + self.write(ignore_extension_data=not write_extension_data) + # END handle write + + return entries_added + + def _items_to_rela_paths( + self, + items: Union[PathLike, Sequence[Union[PathLike, BaseIndexEntry, Blob, Submodule]]], + ) -> List[PathLike]: + """Returns a list of repo-relative paths from the given items which + may be absolute or relative paths, entries or blobs.""" + paths = [] + # If string, put in list. + if isinstance(items, (str, os.PathLike)): + items = [items] + + for item in items: + if isinstance(item, (BaseIndexEntry, (Blob, Submodule))): + paths.append(self._to_relative_path(item.path)) + elif isinstance(item, (str, os.PathLike)): + paths.append(self._to_relative_path(item)) + else: + raise TypeError("Invalid item type: %r" % item) + # END for each item + return paths + + @post_clear_cache + @default_index + def remove( + self, + items: Sequence[Union[PathLike, Blob, BaseIndexEntry, "Submodule"]], + working_tree: bool = False, + **kwargs: Any, + ) -> List[str]: + R"""Remove the given items from the index and optionally from the working tree + as well. + + :param items: + Multiple types of items are supported which may be be freely mixed. + + - path string + + Remove the given path at all stages. If it is a directory, you must + specify the ``r=True`` keyword argument to remove all file entries below + it. If absolute paths are given, they will be converted to a path + relative to the git repository directory containing the working tree + + The path string may include globs, such as ``*.c``. + + - :class:~`git.objects.blob.Blob` object + + Only the path portion is used in this case. + + - :class:`~git.index.typ.BaseIndexEntry` or compatible type + + The only relevant information here is the path. The stage is ignored. + + :param working_tree: + If ``True``, the entry will also be removed from the working tree, + physically removing the respective file. This may fail if there are + uncommitted changes in it. + + :param kwargs: + Additional keyword arguments to be passed to :manpage:`git-rm(1)`, such as + ``r`` to allow recursive removal. + + :return: + List(path_string, ...) list of repository relative paths that have been + removed effectively. + + This is interesting to know in case you have provided a directory or globs. + Paths are relative to the repository. + """ + args = [] + if not working_tree: + args.append("--cached") + args.append("--") + + # Preprocess paths. + paths = self._items_to_rela_paths(items) + removed_paths = self.repo.git.rm(args, paths, **kwargs).splitlines() + + # Process output to gain proper paths. + # rm 'path' + return [p[4:-1] for p in removed_paths] + + @post_clear_cache + @default_index + def move( + self, + items: Sequence[Union[PathLike, Blob, BaseIndexEntry, "Submodule"]], + skip_errors: bool = False, + **kwargs: Any, + ) -> List[Tuple[str, str]]: + """Rename/move the items, whereas the last item is considered the destination of + the move operation. + + If the destination is a file, the first item (of two) must be a file as well. + + If the destination is a directory, it may be preceded by one or more directories + or files. + + The working tree will be affected in non-bare repositories. + + :param items: + Multiple types of items are supported, please see the :meth:`remove` method + for reference. + + :param skip_errors: + If ``True``, errors such as ones resulting from missing source files will be + skipped. + + :param kwargs: + Additional arguments you would like to pass to :manpage:`git-mv(1)`, such as + ``dry_run`` or ``force``. + + :return: + List(tuple(source_path_string, destination_path_string), ...) + + A list of pairs, containing the source file moved as well as its actual + destination. Relative to the repository root. + + :raise ValueError: + If only one item was given. + + :raise git.exc.GitCommandError: + If git could not handle your request. + """ + args = [] + if skip_errors: + args.append("-k") + + paths = self._items_to_rela_paths(items) + if len(paths) < 2: + raise ValueError("Please provide at least one source and one destination of the move operation") + + was_dry_run = kwargs.pop("dry_run", kwargs.pop("n", None)) + kwargs["dry_run"] = True + + # First execute rename in dry run so the command tells us what it actually does + # (for later output). + out = [] + mvlines = self.repo.git.mv(args, paths, **kwargs).splitlines() + + # Parse result - first 0:n/2 lines are 'checking ', the remaining ones are the + # 'renaming' ones which we parse. + for ln in range(int(len(mvlines) / 2), len(mvlines)): + tokens = mvlines[ln].split(" to ") + assert len(tokens) == 2, "Too many tokens in %s" % mvlines[ln] + + # [0] = Renaming x + # [1] = y + out.append((tokens[0][9:], tokens[1])) + # END for each line to parse + + # Either prepare for the real run, or output the dry-run result. + if was_dry_run: + return out + # END handle dry run + + # Now apply the actual operation. + kwargs.pop("dry_run") + self.repo.git.mv(args, paths, **kwargs) + + return out + + def commit( + self, + message: str, + parent_commits: Union[List[Commit], None] = None, + head: bool = True, + author: Union[None, "Actor"] = None, + committer: Union[None, "Actor"] = None, + author_date: Union[datetime.datetime, str, None] = None, + commit_date: Union[datetime.datetime, str, None] = None, + skip_hooks: bool = False, + ) -> Commit: + """Commit the current default index file, creating a + :class:`~git.objects.commit.Commit` object. + + For more information on the arguments, see + :meth:`Commit.create_from_tree `. + + :note: + If you have manually altered the :attr:`entries` member of this instance, + don't forget to :meth:`write` your changes to disk beforehand. + + :note: + Passing ``skip_hooks=True`` is the equivalent of using ``-n`` or + ``--no-verify`` on the command line. + + :return: + :class:`~git.objects.commit.Commit` object representing the new commit + """ + if not skip_hooks: + run_commit_hook("pre-commit", self) + + self._write_commit_editmsg(message) + run_commit_hook("commit-msg", self, self._commit_editmsg_filepath()) + message = self._read_commit_editmsg() + self._remove_commit_editmsg() + tree = self.write_tree() + rval = Commit.create_from_tree( + self.repo, + tree, + message, + parent_commits, + head, + author=author, + committer=committer, + author_date=author_date, + commit_date=commit_date, + ) + if not skip_hooks: + run_commit_hook("post-commit", self) + return rval + + def _write_commit_editmsg(self, message: str) -> None: + with open(self._commit_editmsg_filepath(), "wb") as commit_editmsg_file: + commit_editmsg_file.write(message.encode(defenc)) + + def _remove_commit_editmsg(self) -> None: + os.remove(self._commit_editmsg_filepath()) + + def _read_commit_editmsg(self) -> str: + with open(self._commit_editmsg_filepath(), "rb") as commit_editmsg_file: + return commit_editmsg_file.read().decode(defenc) + + def _commit_editmsg_filepath(self) -> str: + return osp.join(self.repo.common_dir, "COMMIT_EDITMSG") + + def _flush_stdin_and_wait(cls, proc: "Popen[bytes]", ignore_stdout: bool = False) -> bytes: + stdin_IO = proc.stdin + if stdin_IO: + stdin_IO.flush() + stdin_IO.close() + + stdout = b"" + if not ignore_stdout and proc.stdout: + stdout = proc.stdout.read() + + if proc.stdout: + proc.stdout.close() + proc.wait() + return stdout + + @default_index + def checkout( + self, + paths: Union[None, Iterable[PathLike]] = None, + force: bool = False, + fprogress: Callable = lambda *args: None, + **kwargs: Any, + ) -> Union[None, Iterator[PathLike], Sequence[PathLike]]: + """Check out the given paths or all files from the version known to the index + into the working tree. + + :note: + Be sure you have written pending changes using the :meth:`write` method in + case you have altered the entries dictionary directly. + + :param paths: + If ``None``, all paths in the index will be checked out. + Otherwise an iterable of relative or absolute paths or a single path + pointing to files or directories in the index is expected. + + :param force: + If ``True``, existing files will be overwritten even if they contain local + modifications. + If ``False``, these will trigger a :exc:`~git.exc.CheckoutError`. + + :param fprogress: + See :meth:`IndexFile.add` for signature and explanation. + + The provided progress information will contain ``None`` as path and item if + no explicit paths are given. Otherwise progress information will be send + prior and after a file has been checked out. + + :param kwargs: + Additional arguments to be passed to :manpage:`git-checkout-index(1)`. + + :return: + Iterable yielding paths to files which have been checked out and are + guaranteed to match the version stored in the index. + + :raise git.exc.CheckoutError: + * If at least one file failed to be checked out. This is a summary, hence it + will checkout as many files as it can anyway. + * If one of files or directories do not exist in the index (as opposed to + the original git command, which ignores them). + + :raise git.exc.GitCommandError: + If error lines could not be parsed - this truly is an exceptional state. + + :note: + The checkout is limited to checking out the files in the index. Files which + are not in the index anymore and exist in the working tree will not be + deleted. This behaviour is fundamentally different to ``head.checkout``, + i.e. if you want :manpage:`git-checkout(1)`-like behaviour, use + ``head.checkout`` instead of ``index.checkout``. + """ + args = ["--index"] + if force: + args.append("--force") + + failed_files = [] + failed_reasons = [] + unknown_lines = [] + + def handle_stderr(proc: "Popen[bytes]", iter_checked_out_files: Iterable[PathLike]) -> None: + stderr_IO = proc.stderr + if not stderr_IO: + return # Return early if stderr empty. + + stderr_bytes = stderr_IO.read() + # line contents: + stderr = stderr_bytes.decode(defenc) + # git-checkout-index: this already exists + endings = ( + " already exists", + " is not in the cache", + " does not exist at stage", + " is unmerged", + ) + for line in stderr.splitlines(): + if not line.startswith("git checkout-index: ") and not line.startswith("git-checkout-index: "): + is_a_dir = " is a directory" + unlink_issue = "unable to unlink old '" + already_exists_issue = " already exists, no checkout" # created by entry.c:checkout_entry(...) + if line.endswith(is_a_dir): + failed_files.append(line[: -len(is_a_dir)]) + failed_reasons.append(is_a_dir) + elif line.startswith(unlink_issue): + failed_files.append(line[len(unlink_issue) : line.rfind("'")]) + failed_reasons.append(unlink_issue) + elif line.endswith(already_exists_issue): + failed_files.append(line[: -len(already_exists_issue)]) + failed_reasons.append(already_exists_issue) + else: + unknown_lines.append(line) + continue + # END special lines parsing + + for e in endings: + if line.endswith(e): + failed_files.append(line[20 : -len(e)]) + failed_reasons.append(e) + break + # END if ending matches + # END for each possible ending + # END for each line + if unknown_lines: + raise GitCommandError(("git-checkout-index",), 128, stderr) + if failed_files: + valid_files = list(set(iter_checked_out_files) - set(failed_files)) + raise CheckoutError( + "Some files could not be checked out from the index due to local modifications", + failed_files, + valid_files, + failed_reasons, + ) + + # END stderr handler + + if paths is None: + args.append("--all") + kwargs["as_process"] = 1 + fprogress(None, False, None) + proc = self.repo.git.checkout_index(*args, **kwargs) + proc.wait() + fprogress(None, True, None) + rval_iter = (e.path for e in self.entries.values()) + handle_stderr(proc, rval_iter) + return rval_iter + else: + if isinstance(paths, str): + paths = [paths] + + # Make sure we have our entries loaded before we start checkout_index, which + # will hold a lock on it. We try to get the lock as well during our entries + # initialization. + self.entries # noqa: B018 + + args.append("--stdin") + kwargs["as_process"] = True + kwargs["istream"] = subprocess.PIPE + proc = self.repo.git.checkout_index(args, **kwargs) + # FIXME: Reading from GIL! + make_exc = lambda: GitCommandError(("git-checkout-index",) + tuple(args), 128, proc.stderr.read()) + checked_out_files: List[PathLike] = [] + + for path in paths: + co_path = to_native_path_linux(self._to_relative_path(path)) + # If the item is not in the index, it could be a directory. + path_is_directory = False + + try: + self.entries[(co_path, 0)] + except KeyError: + folder = str(co_path) + if not folder.endswith("/"): + folder += "/" + for entry in self.entries.values(): + if str(entry.path).startswith(folder): + p = entry.path + self._write_path_to_stdin(proc, p, p, make_exc, fprogress, read_from_stdout=False) + checked_out_files.append(p) + path_is_directory = True + # END if entry is in directory + # END for each entry + # END path exception handlnig + + if not path_is_directory: + self._write_path_to_stdin(proc, co_path, path, make_exc, fprogress, read_from_stdout=False) + checked_out_files.append(co_path) + # END path is a file + # END for each path + try: + self._flush_stdin_and_wait(proc, ignore_stdout=True) + except GitCommandError: + # Without parsing stdout we don't know what failed. + raise CheckoutError( # noqa: B904 + "Some files could not be checked out from the index, probably because they didn't exist.", + failed_files, + [], + failed_reasons, + ) + + handle_stderr(proc, checked_out_files) + return checked_out_files + # END paths handling + + @default_index + def reset( + self, + commit: Union[Commit, "Reference", str] = "HEAD", + working_tree: bool = False, + paths: Union[None, Iterable[PathLike]] = None, + head: bool = False, + **kwargs: Any, + ) -> "IndexFile": + """Reset the index to reflect the tree at the given commit. This will not adjust + our HEAD reference by default, as opposed to + :meth:`HEAD.reset `. + + :param commit: + Revision, :class:`~git.refs.reference.Reference` or + :class:`~git.objects.commit.Commit` specifying the commit we should + represent. + + If you want to specify a tree only, use :meth:`IndexFile.from_tree` and + overwrite the default index. + + :param working_tree: + If ``True``, the files in the working tree will reflect the changed index. + If ``False``, the working tree will not be touched. + Please note that changes to the working copy will be discarded without + warning! + + :param head: + If ``True``, the head will be set to the given commit. This is ``False`` by + default, but if ``True``, this method behaves like + :meth:`HEAD.reset `. + + :param paths: + If given as an iterable of absolute or repository-relative paths, only these + will be reset to their state at the given commit-ish. + The paths need to exist at the commit, otherwise an exception will be + raised. + + :param kwargs: + Additional keyword arguments passed to :manpage:`git-reset(1)`. + + :note: + :meth:`IndexFile.reset`, as opposed to + :meth:`HEAD.reset `, will not delete any files in + order to maintain a consistent working tree. Instead, it will just check out + the files according to their state in the index. + If you want :manpage:`git-reset(1)`-like behaviour, use + :meth:`HEAD.reset ` instead. + + :return: + self + """ + # What we actually want to do is to merge the tree into our existing index, + # which is what git-read-tree does. + new_inst = type(self).from_tree(self.repo, commit) + if not paths: + self.entries = new_inst.entries + else: + nie = new_inst.entries + for path in paths: + path = self._to_relative_path(path) + try: + key = entry_key(path, 0) + self.entries[key] = nie[key] + except KeyError: + # If key is not in theirs, it musn't be in ours. + try: + del self.entries[key] + except KeyError: + pass + # END handle deletion keyerror + # END handle keyerror + # END for each path + # END handle paths + self.write() + + if working_tree: + self.checkout(paths=paths, force=True) + # END handle working tree + + if head: + self.repo.head.set_commit(self.repo.commit(commit), logmsg="%s: Updating HEAD" % commit) + # END handle head change + + return self + + # FIXME: This is documented to accept the same parameters as Diffable.diff, but this + # does not handle NULL_TREE for `other`. (The suppressed mypy error is about this.) + def diff( + self, + other: Union[ # type: ignore[override] + Literal[git_diff.DiffConstants.INDEX], + "Tree", + "Commit", + str, + None, + ] = git_diff.INDEX, + paths: Union[PathLike, List[PathLike], Tuple[PathLike, ...], None] = None, + create_patch: bool = False, + **kwargs: Any, + ) -> git_diff.DiffIndex: + """Diff this index against the working copy or a :class:`~git.objects.tree.Tree` + or :class:`~git.objects.commit.Commit` object. + + For documentation of the parameters and return values, see + :meth:`Diffable.diff `. + + :note: + Will only work with indices that represent the default git index as they + have not been initialized with a stream. + """ + # Only run if we are the default repository index. + if self._file_path != self._index_path(): + raise AssertionError("Cannot call %r on indices that do not represent the default git index" % self.diff()) + # Index against index is always empty. + if other is self.INDEX: + return git_diff.DiffIndex() + + # Index against anything but None is a reverse diff with the respective item. + # Handle existing -R flags properly. + # Transform strings to the object so that we can call diff on it. + if isinstance(other, str): + other = self.repo.rev_parse(other) + # END object conversion + + if isinstance(other, Object): # For Tree or Commit. + # Invert the existing R flag. + cur_val = kwargs.get("R", False) + kwargs["R"] = not cur_val + return other.diff(self.INDEX, paths, create_patch, **kwargs) + # END diff against other item handling + + # If other is not None here, something is wrong. + if other is not None: + raise ValueError("other must be None, Diffable.INDEX, a Tree or Commit, was %r" % other) + + # Diff against working copy - can be handled by superclass natively. + return super().diff(other, paths, create_patch, **kwargs) diff --git a/parrot/lib/python3.10/site-packages/git/index/util.py b/parrot/lib/python3.10/site-packages/git/index/util.py new file mode 100644 index 0000000000000000000000000000000000000000..e59cb609f70a2ec532b310158f021618e7d0505d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/git/index/util.py @@ -0,0 +1,121 @@ +# This module is part of GitPython and is released under the +# 3-Clause BSD License: https://opensource.org/license/bsd-3-clause/ + +"""Index utilities.""" + +__all__ = ["TemporaryFileSwap", "post_clear_cache", "default_index", "git_working_dir"] + +import contextlib +from functools import wraps +import os +import os.path as osp +import struct +import tempfile +from types import TracebackType + +# typing ---------------------------------------------------------------------- + +from typing import Any, Callable, TYPE_CHECKING, Optional, Type + +from git.types import Literal, PathLike, _T + +if TYPE_CHECKING: + from git.index import IndexFile + +# --------------------------------------------------------------------------------- + +# { Aliases +pack = struct.pack +unpack = struct.unpack +# } END aliases + + +class TemporaryFileSwap: + """Utility class moving a file to a temporary location within the same directory and + moving it back on to where on object deletion.""" + + __slots__ = ("file_path", "tmp_file_path") + + def __init__(self, file_path: PathLike) -> None: + self.file_path = file_path + dirname, basename = osp.split(file_path) + fd, self.tmp_file_path = tempfile.mkstemp(prefix=basename, dir=dirname) + os.close(fd) + with contextlib.suppress(OSError): # It may be that the source does not exist. + os.replace(self.file_path, self.tmp_file_path) + + def __enter__(self) -> "TemporaryFileSwap": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Literal[False]: + if osp.isfile(self.tmp_file_path): + os.replace(self.tmp_file_path, self.file_path) + return False + + +# { Decorators + + +def post_clear_cache(func: Callable[..., _T]) -> Callable[..., _T]: + """Decorator for functions that alter the index using the git command. + + When a git command alters the index, this invalidates our possibly existing entries + dictionary, which is why it must be deleted to allow it to be lazily reread later. + """ + + @wraps(func) + def post_clear_cache_if_not_raised(self: "IndexFile", *args: Any, **kwargs: Any) -> _T: + rval = func(self, *args, **kwargs) + self._delete_entries_cache() + return rval + + # END wrapper method + + return post_clear_cache_if_not_raised + + +def default_index(func: Callable[..., _T]) -> Callable[..., _T]: + """Decorator ensuring the wrapped method may only run if we are the default + repository index. + + This is as we rely on git commands that operate on that index only. + """ + + @wraps(func) + def check_default_index(self: "IndexFile", *args: Any, **kwargs: Any) -> _T: + if self._file_path != self._index_path(): + raise AssertionError( + "Cannot call %r on indices that do not represent the default git index" % func.__name__ + ) + return func(self, *args, **kwargs) + + # END wrapper method + + return check_default_index + + +def git_working_dir(func: Callable[..., _T]) -> Callable[..., _T]: + """Decorator which changes the current working dir to the one of the git + repository in order to ensure relative paths are handled correctly.""" + + @wraps(func) + def set_git_working_dir(self: "IndexFile", *args: Any, **kwargs: Any) -> _T: + cur_wd = os.getcwd() + os.chdir(str(self.repo.working_tree_dir)) + try: + return func(self, *args, **kwargs) + finally: + os.chdir(cur_wd) + # END handle working dir + + # END wrapper + + return set_git_working_dir + + +# } END decorators diff --git a/parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc b/parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3c297c5efc4f18677a74dd974f98480ddbc0181 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_client.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:757403bb67f240cd69d8ba9279c1a9dc974b8549eb1372c7f8ae63b5549e88c6 +size 135486 diff --git a/parrot/lib/python3.10/site-packages/imageio/__init__.py b/parrot/lib/python3.10/site-packages/imageio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0cc19799859255660d2a9ab9777ce125dd1de8d7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/__init__.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2014-2020, imageio contributors +# imageio is distributed under the terms of the (new) BSD License. + +# This docstring is used at the index of the documentation pages, and +# gets inserted into a slightly larger description (in setup.py) for +# the page on Pypi: +""" +Imageio is a Python library that provides an easy interface to read and +write a wide range of image data, including animated images, volumetric +data, and scientific formats. It is cross-platform, runs on Python 3.5+, +and is easy to install. + +Main website: https://imageio.readthedocs.io/ +""" + +# flake8: noqa + +__version__ = "2.35.1" + +import warnings + +# Load some bits from core +from .core import FormatManager, RETURN_BYTES + +# Instantiate the old format manager +formats = FormatManager() +show_formats = formats.show + +from . import v2 +from . import v3 +from . import plugins + +# import config after core to avoid circular import +from . import config + +# import all APIs into the top level (meta API) +from .v2 import ( + imread as imread_v2, + mimread, + volread, + mvolread, + imwrite, + mimwrite, + volwrite, + mvolwrite, + # aliases + get_reader as read, + get_writer as save, + imwrite as imsave, + mimwrite as mimsave, + volwrite as volsave, + mvolwrite as mvolsave, + # misc + help, + get_reader, + get_writer, +) +from .v3 import ( + imopen, + # imread, # Will take over once v3 is released + # imwrite, # Will take over once v3 is released + imiter, +) + + +def imread(uri, format=None, **kwargs): + """imread(uri, format=None, **kwargs) + + Reads an image from the specified file. Returns a numpy array, which + comes with a dict of meta data at its 'meta' attribute. + + Note that the image data is returned as-is, and may not always have + a dtype of uint8 (and thus may differ from what e.g. PIL returns). + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + warnings.warn( + "Starting with ImageIO v3 the behavior of this function will switch to that of" + " iio.v3.imread. To keep the current behavior (and make this warning disappear)" + " use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.", + DeprecationWarning, + stacklevel=2, + ) + + return imread_v2(uri, format=format, **kwargs) + + +__all__ = [ + "v2", + "v3", + "config", + "plugins", + # v3 API + "imopen", + "imread", + "imwrite", + "imiter", + # v2 API + "mimread", + "volread", + "mvolread", + "imwrite", + "mimwrite", + "volwrite", + "mvolwrite", + # v2 aliases + "read", + "save", + "imsave", + "mimsave", + "volsave", + "mvolsave", + # functions to deprecate + "help", + "get_reader", + "get_writer", + "formats", + "show_formats", +] diff --git a/parrot/lib/python3.10/site-packages/imageio/__main__.py b/parrot/lib/python3.10/site-packages/imageio/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..ad0ea0b5dc9976351a59f34d4e720dfc5a7c5a53 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/__main__.py @@ -0,0 +1,169 @@ +""" +Console scripts and associated helper methods for imageio. +""" + +import argparse +import os +from os import path as op +import shutil +import sys + + +from . import plugins +from .core import util + +# A list of plugins that require binaries from the imageio-binaries +# repository. These plugins must implement the `download` method. +PLUGINS_WITH_BINARIES = ["freeimage"] + + +def download_bin(plugin_names=["all"], package_dir=False): + """Download binary dependencies of plugins + + This is a convenience method for downloading the binaries + (e.g. for freeimage) from the imageio-binaries + repository. + + Parameters + ---------- + plugin_names: list + A list of imageio plugin names. If it contains "all", all + binary dependencies are downloaded. + package_dir: bool + If set to `True`, the binaries will be downloaded to the + `resources` directory of the imageio package instead of + to the users application data directory. Note that this + might require administrative rights if imageio is installed + in a system directory. + """ + if plugin_names.count("all"): + # Use all plugins + plugin_names = PLUGINS_WITH_BINARIES + + plugin_names.sort() + print("Ascertaining binaries for: {}.".format(", ".join(plugin_names))) + + if package_dir: + # Download the binaries to the `resources` directory + # of imageio. If imageio comes as an .egg, then a cache + # directory will be created by pkg_resources (requires setuptools). + # see `imageio.core.util.resource_dirs` + # and `imageio.core.utilresource_package_dir` + directory = util.resource_package_dir() + else: + directory = None + + for plg in plugin_names: + if plg not in PLUGINS_WITH_BINARIES: + msg = "Plugin {} not registered for binary download!".format(plg) + raise Exception(msg) + mod = getattr(plugins, plg) + mod.download(directory=directory) + + +def download_bin_main(): + """Argument-parsing wrapper for `download_bin`""" + description = "Download plugin binary dependencies" + phelp = ( + "Plugin name for which to download the binary. " + + "If no argument is given, all binaries are downloaded." + ) + dhelp = ( + "Download the binaries to the package directory " + + "(default is the users application data directory). " + + "This might require administrative rights." + ) + example_text = ( + "examples:\n" + + " imageio_download_bin all\n" + + " imageio_download_bin freeimage\n" + ) + parser = argparse.ArgumentParser( + description=description, + epilog=example_text, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("plugin", type=str, nargs="*", default="all", help=phelp) + parser.add_argument( + "--package-dir", + dest="package_dir", + action="store_true", + default=False, + help=dhelp, + ) + args = parser.parse_args() + download_bin(plugin_names=args.plugin, package_dir=args.package_dir) + + +def remove_bin(plugin_names=["all"]): + """Remove binary dependencies of plugins + + This is a convenience method that removes all binaries + dependencies for plugins downloaded by imageio. + + Notes + ----- + It only makes sense to use this method if the binaries + are corrupt. + """ + if plugin_names.count("all"): + # Use all plugins + plugin_names = PLUGINS_WITH_BINARIES + + print("Removing binaries for: {}.".format(", ".join(plugin_names))) + + rdirs = util.resource_dirs() + + for plg in plugin_names: + if plg not in PLUGINS_WITH_BINARIES: + msg = "Plugin {} not registered for binary download!".format(plg) + raise Exception(msg) + + not_removed = [] + for rd in rdirs: + # plugin name is in subdirectories + for rsub in os.listdir(rd): + if rsub in plugin_names: + plgdir = op.join(rd, rsub) + try: + shutil.rmtree(plgdir) + except Exception: + not_removed.append(plgdir) + if not_removed: + nrs = ",".join(not_removed) + msg2 = ( + "These plugins files could not be removed: {}\n".format(nrs) + + "Make sure they are not used by any program and try again." + ) + raise Exception(msg2) + + +def remove_bin_main(): + """Argument-parsing wrapper for `remove_bin`""" + description = "Remove plugin binary dependencies" + phelp = ( + "Plugin name for which to remove the binary. " + + "If no argument is given, all binaries are removed." + ) + example_text = ( + "examples:\n" + + " imageio_remove_bin all\n" + + " imageio_remove_bin freeimage\n" + ) + parser = argparse.ArgumentParser( + description=description, + epilog=example_text, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("plugin", type=str, nargs="*", default="all", help=phelp) + args = parser.parse_args() + remove_bin(plugin_names=args.plugin) + + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] == "download_bin": + download_bin_main() + elif len(sys.argv) > 1 and sys.argv[1] == "remove_bin": + remove_bin_main() + else: + raise RuntimeError("Invalid use of the imageio CLI") diff --git a/parrot/lib/python3.10/site-packages/imageio/config/__init__.py b/parrot/lib/python3.10/site-packages/imageio/config/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca78dd22f2e690bc5115565e9e0c11b67929031c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/config/__init__.py @@ -0,0 +1,16 @@ +from .extensions import ( + extension_list, + known_extensions, + FileExtension, + video_extensions, +) +from .plugins import known_plugins, PluginConfig + +__all__ = [ + "known_plugins", + "PluginConfig", + "extension_list", + "known_extensions", + "FileExtension", + "video_extensions", +] diff --git a/parrot/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eeea566b61f169927bfbb20df63db1281d8dc51e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f43d34b384b19604bebca9738316a25be91bdcb2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/config/__pycache__/plugins.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/config/__pycache__/plugins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94e4fd91e24948e8c1daeb7b04f4fc629fde3f7a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/config/__pycache__/plugins.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/config/extensions.py b/parrot/lib/python3.10/site-packages/imageio/config/extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..00c716246ad5683dfbf505c9347561ab5c13bd1c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/config/extensions.py @@ -0,0 +1,2002 @@ +""" +A set of objects representing each file extension recognized by ImageIO. If an +extension is not listed here it is still supported, as long as there exists a +supporting backend. + +""" + + +class FileExtension: + """File Extension Metadata + + This class holds information about a image file format associated with a + given extension. This information is used to track plugins that are known to + be able to handle a particular format. It also contains additional + information about a format, which is used when creating the supported format + docs. + + Plugins known to be able to handle this format are ordered by a ``priority`` + list. This list is used to determine the ideal plugin to use when choosing a + plugin based on file extension. + + Parameters + ---------- + extension : str + The name of the extension including the initial dot, e.g. ".png". + priority : List + A list of plugin names (entries in config.known_plugins) that can handle + this format. The position of a plugin expresses a preference, e.g. + ["plugin1", "plugin2"] indicates that, if available, plugin1 should be + preferred over plugin2 when handling a request related to this format. + name : str + The full name of the format. + description : str + A description of the format. + external_link : str + A link to further information about the format. Typically, the format's + specification. + volume_support : str + If True, the format/extension supports volumetric image data. + + Examples + -------- + >>> FileExtension( + name="Bitmap", + extension=".bmp", + priority=["pillow", "BMP-PIL", "BMP-FI", "ITK"], + external_link="https://en.wikipedia.org/wiki/BMP_file_format", + ) + + """ + + def __init__( + self, + *, + extension, + priority, + name=None, + description=None, + external_link=None, + volume_support=False + ): + self.extension = extension + self.priority = priority + self.name = name + self.description = description + self.external_link = external_link + self.default_priority = priority.copy() + self.volume_support = volume_support + + def reset(self): + self.priority = self.default_priority.copy() + + +extension_list = [ + FileExtension( + name="Hasselblad raw", + extension=".3fr", + priority=["RAW-FI"], + ), + FileExtension( + name="Sony alpha", + extension=".arw", + priority=["RAW-FI"], + ), + FileExtension( + name="Animated Portable Network Graphics", + external_link="https://en.wikipedia.org/wiki/APNG", + extension=".apng", + priority=["pillow", "pyav"], + ), + FileExtension( + name="Audio Video Interleave", + extension=".avi", + priority=["FFMPEG"], + ), + FileExtension( + name="Casio raw format", + extension=".bay", + priority=["RAW-FI"], + ), + FileExtension( + extension=".blp", + priority=["pillow"], + ), + FileExtension( + name="Bitmap", + extension=".bmp", + priority=["pillow", "BMP-PIL", "BMP-FI", "ITK", "pyav", "opencv"], + external_link="https://en.wikipedia.org/wiki/BMP_file_format", + ), + FileExtension( + name="Device-Independent Bitmap", + extension=".dip", + priority=["opencv"], + external_link="https://en.wikipedia.org/wiki/BMP_file_format", + ), + FileExtension( + name="Re-Volt mipmap", + extension=".bmq", + priority=["RAW-FI"], + ), + FileExtension( + name="Binary Structured Data Format", + extension=".bsdf", + priority=["BSDF"], + external_link="http://bsdf.io/", + ), + FileExtension( + name="Binary Universal Form for the Representation of meteorological data", + extension=".bufr", + priority=["pillow", "BUFR-PIL"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".bw", + priority=["pillow", "SGI-PIL", "SGI-FI"], + ), + FileExtension( + name="Scirra Construct", + extension=".cap", + priority=["RAW-FI"], + ), + FileExtension( + name="AMETEK High Speed Camera Format", + extension=".cine", + priority=["RAW-FI"], + external_link="https://phantomhighspeed-knowledge.secure.force.com/servlet/fileField?id=0BE1N000000kD2i#:~:text=Cine%20is%20a%20video%20file,camera%20model%20and%20image%20resolution", + ), + FileExtension(extension=".cr2", priority=["RAW-FI"]), + FileExtension( + extension=".crw", + priority=["RAW-FI"], + ), + FileExtension( + extension=".cs1", + priority=["RAW-FI"], + ), + FileExtension( + name="Computerized Tomography", + extension=".ct", + priority=["DICOM"], + ), + FileExtension( + name="Windows Cursor Icons", + extension=".cur", + priority=["pillow", "CUR-PIL"], + ), + FileExtension( + name="Dr. Halo", + extension=".cut", + priority=["CUT-FI"], + ), + FileExtension( + extension=".dc2", + priority=["RAW-FI"], + ), + FileExtension( + name="DICOM file format", + extension=".dcm", + priority=["DICOM", "ITK"], + ), + FileExtension( + extension=".dcr", + priority=["RAW-FI"], + ), + FileExtension( + name="Intel DCX", + extension=".dcx", + priority=["pillow", "DCX-PIL"], + ), + FileExtension( + name="DirectX Texture Container", + extension=".dds", + priority=["pillow", "DDS-FI", "DDS-PIL"], + ), + FileExtension( + name="Windows Bitmap", + extension=".dib", + priority=["pillow", "DIB-PIL"], + ), + FileExtension( + name="DICOM file format", + extension=".dicom", + priority=["ITK"], + ), + FileExtension( + extension=".dng", + priority=["RAW-FI"], + ), + FileExtension( + extension=".drf", + priority=["RAW-FI"], + ), + FileExtension( + extension=".dsc", + priority=["RAW-FI"], + ), + FileExtension( + name="Enhanced Compression Wavelet", + extension=".ecw", + priority=["GDAL"], + ), + FileExtension( + name="Windows Metafile", + extension=".emf", + priority=["pillow", "WMF-PIL"], + ), + FileExtension( + name="Encapsulated Postscript", + extension=".eps", + priority=["pillow", "EPS-PIL"], + ), + FileExtension( + extension=".erf", + priority=["RAW-FI"], + ), + FileExtension( + name="OpenEXR", + extension=".exr", + external_link="https://openexr.readthedocs.io/en/latest/", + priority=["EXR-FI", "pyav", "opencv"], + ), + FileExtension( + extension=".fff", + priority=["RAW-FI"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fit", + priority=["pillow", "FITS-PIL", "FITS"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fits", + priority=["pillow", "FITS-PIL", "FITS", "pyav"], + ), + FileExtension( + name="Autodesk FLC Animation", + extension=".flc", + priority=["pillow", "FLI-PIL"], + ), + FileExtension( + name="Autodesk FLI Animation", + extension=".fli", + priority=["pillow", "FLI-PIL"], + ), + FileExtension( + name="Kodak FlashPix", + extension=".fpx", + priority=["pillow", "FPX-PIL"], + ), + FileExtension( + name="Independence War 2: Edge Of Chaos Texture Format", + extension=".ftc", + priority=["pillow", "FTEX-PIL"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fts", + priority=["FITS"], + ), + FileExtension( + name="Independence War 2: Edge Of Chaos Texture Format", + extension=".ftu", + priority=["pillow", "FTEX-PIL"], + ), + FileExtension( + name="Flexible Image Transport System File", + extension=".fz", + priority=["FITS"], + ), + FileExtension( + name="Raw fax format CCITT G.3", + extension=".g3", + priority=["G3-FI"], + ), + FileExtension( + name="GIMP brush file", + extension=".gbr", + priority=["pillow", "GBR-PIL"], + ), + FileExtension( + name="Grassroots DICOM", + extension=".gdcm", + priority=["ITK"], + ), + FileExtension( + name="Graphics Interchange Format", + extension=".gif", + priority=["pillow", "GIF-PIL", "pyav"], + ), + FileExtension( + name="UMDS GIPL", + extension=".gipl", + priority=["ITK"], + ), + FileExtension( + name="gridded meteorological data", + extension=".grib", + priority=["pillow", "GRIB-PIL"], + ), + FileExtension( + name="Hierarchical Data Format 5", + extension=".h5", + priority=["pillow", "HDF5-PIL"], + ), + FileExtension( + name="Hierarchical Data Format 5", + extension=".hdf", + priority=["pillow", "HDF5-PIL"], + ), + FileExtension( + name="Hierarchical Data Format 5", + extension=".hdf5", + priority=["ITK"], + ), + FileExtension( + name="JPEG Extended Range", + extension=".hdp", + priority=["JPEG-XR-FI"], + ), + FileExtension( + name="High Dynamic Range Image", + extension=".hdr", + priority=["HDR-FI", "ITK", "opencv"], + ), + FileExtension( + extension=".ia", + priority=["RAW-FI"], + ), + FileExtension( + extension=".icb", + priority=["pillow"], + ), + FileExtension( + name="Mac OS Icon File", + extension=".icns", + priority=["pillow", "ICNS-PIL"], + ), + FileExtension( + name="Windows Icon File", + extension=".ico", + priority=["pillow", "ICO-FI", "ICO-PIL", "pyav"], + ), + FileExtension( + name="ILBM Interleaved Bitmap", + extension=".iff", + priority=["IFF-FI"], + ), + FileExtension( + name="IPTC/NAA", + extension=".iim", + priority=["pillow", "IPTC-PIL"], + ), + FileExtension( + extension=".iiq", + priority=["RAW-FI"], + ), + FileExtension( + name="IFUNC Image Memory", + extension=".im", + priority=["pillow", "IM-PIL"], + ), + FileExtension( + extension=".img", + priority=["ITK", "GDAL"], + ), + FileExtension( + extension=".img.gz", + priority=["ITK"], + ), + FileExtension( + name="IM Tools", + extension=".IMT", + priority=["pillow", "IMT-PIL"], + ), + FileExtension( + name="Image Processing Lab", + extension=".ipl", + priority=["ITK"], + ), + FileExtension( + name="JPEG 2000", + extension=".j2c", + priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"], + ), + FileExtension( + name="JPEG 2000", + extension=".j2k", + priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"], + ), + FileExtension( + name="JPEG", + extension=".jfif", + priority=["pillow", "JPEG-PIL"], + ), + FileExtension( + name="JPEG", + extension=".jif", + priority=["JPEG-FI"], + ), + FileExtension( + name="JPEG Network Graphics", + extension=".jng", + priority=["JNG-FI"], + ), + FileExtension( + name="JPEG 2000", + extension=".jp2", + priority=["pillow", "JP2-FI", "JPEG2000-PIL", "pyav", "opencv"], + ), + FileExtension( + name="JPEG 2000", + extension=".jpc", + priority=["pillow", "JPEG2000-PIL"], + ), + FileExtension( + name="JPEG", + extension=".jpe", + priority=["pillow", "JPEG-FI", "JPEG-PIL", "opencv"], + ), + FileExtension( + name="Joint Photographic Experts Group", + extension=".jpeg", + priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"], + ), + FileExtension( + name="JPEG 2000", + extension=".jpf", + priority=["pillow", "JPEG2000-PIL"], + ), + FileExtension( + name="Joint Photographic Experts Group", + extension=".jpg", + priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"], + ), + FileExtension( + name="JPEG 2000", + extension=".jpx", + priority=["pillow", "JPEG2000-PIL"], + ), + FileExtension( + name="JPEG Extended Range", + extension=".jxr", + priority=["JPEG-XR-FI"], + ), + FileExtension( + extension=".k25", + priority=["RAW-FI"], + ), + FileExtension( + extension=".kc2", + priority=["RAW-FI"], + ), + FileExtension( + extension=".kdc", + priority=["RAW-FI"], + ), + FileExtension( + name="C64 Koala Graphics", + extension=".koa", + priority=["KOALA-FI"], + ), + FileExtension( + name="ILBM Interleaved Bitmap", + extension=".lbm", + priority=["IFF-FI"], + ), + FileExtension( + name="Lytro F01", + extension=".lfp", + priority=["LYTRO-LFP"], + ), + FileExtension( + name="Lytro Illum", + extension=".lfr", + priority=["LYTRO-LFR"], + ), + FileExtension( + name="ZEISS LSM", + extension=".lsm", + priority=["tifffile", "ITK", "TIFF"], + ), + FileExtension( + name="McIdas area file", + extension=".MCIDAS", + priority=["pillow", "MCIDAS-PIL"], + external_link="https://www.ssec.wisc.edu/mcidas/doc/prog_man/2003print/progman2003-formats.html", + ), + FileExtension( + extension=".mdc", + priority=["RAW-FI"], + ), + FileExtension( + extension=".mef", + priority=["RAW-FI"], + ), + FileExtension( + name="FreeSurfer File Format", + extension=".mgh", + priority=["ITK"], + ), + FileExtension( + name="ITK MetaImage", + extension=".mha", + priority=["ITK"], + ), + FileExtension( + name="ITK MetaImage Header", + extension=".mhd", + priority=["ITK"], + ), + FileExtension( + name="Microsoft Image Composer", + extension=".mic", + priority=["pillow", "MIC-PIL"], + ), + FileExtension( + name="Matroska Multimedia Container", + extension=".mkv", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="Medical Imaging NetCDF", + extension=".mnc", + priority=["ITK"], + ), + FileExtension( + name="Medical Imaging NetCDF 2", + extension=".mnc2", + priority=["ITK"], + ), + FileExtension( + name="Leaf Raw Image Format", + extension=".mos", + priority=["RAW-FI"], + ), + FileExtension( + name="QuickTime File Format", + extension=".mov", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="MPEG-4 Part 14", + extension=".mp4", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="MPEG-1 Moving Picture Experts Group", + extension=".mpeg", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="Moving Picture Experts Group", + extension=".mpg", + priority=["pillow", "FFMPEG", "pyav"], + ), + FileExtension( + name="JPEG Multi-Picture Format", + extension=".mpo", + priority=["pillow", "MPO-PIL"], + ), + FileExtension( + name="Magnetic resonance imaging", + extension=".mri", + priority=["DICOM"], + ), + FileExtension( + extension=".mrw", + priority=["RAW-FI"], + ), + FileExtension( + name="Windows Paint", + extension=".msp", + priority=["pillow", "MSP-PIL"], + ), + FileExtension( + extension=".nef", + priority=["RAW-FI", "rawpy"], + ), + FileExtension( + extension=".nhdr", + priority=["ITK"], + ), + FileExtension( + extension=".nia", + priority=["ITK"], + ), + FileExtension( + extension=".nii", + priority=["ITK"], + ), + FileExtension( + name="nii.gz", + extension=".nii.gz", + priority=["ITK"], + ), + FileExtension( + name="Numpy Array", + extension=".npz", + priority=["NPZ"], + volume_support=True, + ), + FileExtension( + extension=".nrrd", + priority=["ITK"], + ), + FileExtension( + extension=".nrw", + priority=["RAW-FI"], + ), + FileExtension( + extension=".orf", + priority=["RAW-FI"], + ), + FileExtension( + extension=".palm", + priority=["pillow"], + ), + FileExtension( + name="Portable Bitmap", + extension=".pbm", + priority=["PGM-FI", "PGMRAW-FI", "pyav", "opencv"], + ), + FileExtension( + name="Kodak PhotoCD", + extension=".pcd", + priority=["pillow", "PCD-FI", "PCD-PIL"], + ), + FileExtension( + name="Macintosh PICT", + extension=".pct", + priority=["PICT-FI"], + ), + FileExtension( + name="Zsoft Paintbrush", + extension=".PCX", + priority=["pillow", "PCX-FI", "PCX-PIL"], + ), + FileExtension( + extension=".pdf", + priority=["pillow"], + ), + FileExtension( + extension=".pef", + priority=["RAW-FI"], + ), + FileExtension( + extension=".pfm", + priority=["PFM-FI", "pyav", "opencv"], + ), + FileExtension( + name="Portable Greymap", + extension=".pgm", + priority=["pillow", "PGM-FI", "PGMRAW-FI", "pyav", "opencv"], + ), + FileExtension( + name="Macintosh PICT", + extension=".pic", + priority=["PICT-FI", "ITK", "opencv"], + ), + FileExtension( + name="Macintosh PICT", + extension=".pict", + priority=["PICT-FI"], + ), + FileExtension( + name="Portable Network Graphics", + extension=".png", + priority=["pillow", "PNG-PIL", "PNG-FI", "ITK", "pyav", "opencv"], + ), + FileExtension( + name="Portable Image Format", + extension=".pnm", + priority=["pillow", "opencv"], + ), + FileExtension( + name="Pbmplus image", + extension=".ppm", + priority=["pillow", "PPM-PIL", "pyav"], + ), + FileExtension( + name="Pbmplus image", + extension=".pbm", + priority=["pillow", "PPM-PIL", "PPM-FI"], + ), + FileExtension( + name="Portable image format", + extension=".pxm", + priority=["opencv"], + ), + FileExtension( + name="Portable Pixelmap (ASCII)", + extension=".ppm", + priority=["PPM-FI", "opencv"], + ), + FileExtension( + name="Portable Pixelmap (Raw)", + extension=".ppm", + priority=["PPMRAW-FI"], + ), + FileExtension( + name="Ghostscript", + extension=".ps", + priority=["pillow", "EPS-PIL"], + ), + FileExtension( + name="Adope Photoshop 2.5 and 3.0", + extension=".psd", + priority=["pillow", "PSD-PIL", "PSD-FI"], + ), + FileExtension( + extension=".ptx", + priority=["RAW-FI"], + ), + FileExtension( + extension=".pxn", + priority=["RAW-FI"], + ), + FileExtension( + name="PIXAR raster image", + extension=".pxr", + priority=["pillow", "PIXAR-PIL"], + ), + FileExtension( + extension=".qtk", + priority=["RAW-FI"], + ), + FileExtension( + extension=".raf", + priority=["RAW-FI"], + ), + FileExtension( + name="Sun Raster File", + extension=".ras", + priority=["pillow", "SUN-PIL", "RAS-FI", "pyav", "opencv"], + ), + FileExtension( + name="Sun Raster File", + extension=".sr", + priority=["opencv"], + ), + FileExtension( + extension=".raw", + priority=["RAW-FI", "LYTRO-ILLUM-RAW", "LYTRO-F01-RAW", "rawpy"], + ), + FileExtension( + extension=".rdc", + priority=["RAW-FI"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".rgb", + priority=["pillow", "SGI-PIL"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".rgba", + priority=["pillow", "SGI-PIL"], + ), + FileExtension( + extension=".rw2", + priority=["RAW-FI"], + ), + FileExtension( + extension=".rwl", + priority=["RAW-FI"], + ), + FileExtension( + extension=".rwz", + priority=["RAW-FI"], + ), + FileExtension( + name="Silicon Graphics Image", + extension=".sgi", + priority=["pillow", "SGI-PIL", "pyav"], + ), + FileExtension( + name="SPE File Format", + extension=".spe", + priority=["SPE"], + ), + FileExtension( + extension=".SPIDER", + priority=["pillow", "SPIDER-PIL"], + ), + FileExtension( + extension=".sr2", + priority=["RAW-FI"], + ), + FileExtension( + extension=".srf", + priority=["RAW-FI"], + ), + FileExtension( + extension=".srw", + priority=["RAW-FI"], + ), + FileExtension( + extension=".sti", + priority=["RAW-FI"], + ), + FileExtension( + extension=".stk", + priority=["tifffile", "TIFF"], + ), + FileExtension( + name="ShockWave Flash", + extension=".swf", + priority=["SWF", "pyav"], + ), + FileExtension( + name="Truevision TGA", + extension=".targa", + priority=["pillow", "TARGA-FI"], + ), + FileExtension( + name="Truevision TGA", + extension=".tga", + priority=["pillow", "TGA-PIL", "TARGA-FI", "pyav"], + ), + FileExtension( + name="Tagged Image File", + extension=".tif", + priority=[ + "tifffile", + "TIFF", + "pillow", + "TIFF-PIL", + "TIFF-FI", + "FEI", + "ITK", + "GDAL", + "pyav", + "opencv", + ], + volume_support=True, + ), + FileExtension( + name="Tagged Image File Format", + extension=".tiff", + priority=[ + "tifffile", + "TIFF", + "pillow", + "TIFF-PIL", + "TIFF-FI", + "FEI", + "ITK", + "GDAL", + "pyav", + "opencv", + ], + volume_support=True, + ), + FileExtension( + extension=".vda", + priority=["pillow"], + ), + FileExtension( + extension=".vst", + priority=["pillow"], + ), + FileExtension( + extension=".vtk", + priority=["ITK"], + ), + FileExtension( + name="Wireless Bitmap", + extension=".wap", + priority=["WBMP-FI"], + ), + FileExtension( + name="Wireless Bitmap", + extension=".wbm", + priority=["WBMP-FI"], + ), + FileExtension( + name="Wireless Bitmap", + extension=".wbmp", + priority=["WBMP-FI"], + ), + FileExtension( + name="JPEG Extended Range", + extension=".wdp", + priority=["JPEG-XR-FI"], + ), + FileExtension( + name="Matroska", + extension=".webm", + priority=["FFMPEG", "pyav"], + ), + FileExtension( + name="Google WebP", + extension=".webp", + priority=["pillow", "WEBP-FI", "pyav", "opencv"], + ), + FileExtension( + name="Windows Meta File", + extension=".wmf", + priority=["pillow", "WMF-PIL"], + ), + FileExtension( + name="Windows Media Video", + extension=".wmv", + priority=["FFMPEG"], + ), + FileExtension( + name="X11 Bitmap", + extension=".xbm", + priority=["pillow", "XBM-PIL", "XBM-FI", "pyav"], + ), + FileExtension( + name="X11 Pixel Map", + extension=".xpm", + priority=["pillow", "XPM-PIL", "XPM-FI"], + ), + FileExtension( + name="Thumbnail Image", + extension=".XVTHUMB", + priority=["pillow", "XVTHUMB-PIL"], + ), + FileExtension( + extension=".dpx", + priority=["pyav"], + ), + FileExtension( + extension=".im1", + priority=["pyav"], + ), + FileExtension( + extension=".im24", + priority=["pyav"], + ), + FileExtension( + extension=".im8", + priority=["pyav"], + ), + FileExtension( + extension=".jls", + priority=["pyav"], + ), + FileExtension( + extension=".ljpg", + priority=["pyav"], + ), + FileExtension( + extension=".pam", + priority=["pyav"], + ), + FileExtension( + extension=".pcx", + priority=["pyav"], + ), + FileExtension( + extension=".pgmyuv", + priority=["pyav"], + ), + FileExtension( + extension=".pix", + priority=["pyav"], + ), + FileExtension( + extension=".ppm", + priority=["pyav"], + ), + FileExtension( + extension=".rs", + priority=["pyav"], + ), + FileExtension( + extension=".sun", + priority=["pyav"], + ), + FileExtension( + extension=".sunras", + priority=["pyav"], + ), + FileExtension( + extension=".xface", + priority=["pyav"], + ), + FileExtension( + extension=".xwd", + priority=["pyav"], + ), + FileExtension( + extension=".y", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="3GP (3GPP file format)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="3GP2 (3GPP2 file format)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="3GPP AMR", + extension=".amr", + priority=["pyav"], + ), + FileExtension( + name="a64 - video for Commodore 64", + extension=".A64", + priority=["pyav"], + ), + FileExtension( + name="a64 - video for Commodore 64", + extension=".a64", + priority=["pyav"], + ), + FileExtension( + name="Adobe Filmstrip", + extension=".flm", + priority=["pyav"], + ), + FileExtension( + name="AMV", + extension=".amv", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".asf", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".asf", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".wmv", + priority=["pyav"], + ), + FileExtension( + name="ASF (Advanced / Active Streaming Format)", + extension=".wmv", + priority=["pyav"], + ), + FileExtension( + name="AV1 Annex B", + extension=".obu", + priority=["pyav"], + ), + FileExtension( + name="AV1 low overhead OBU", + extension=".obu", + priority=["pyav"], + ), + FileExtension( + name="AVI (Audio Video Interleaved)", + extension=".avi", + priority=["pyav"], + ), + FileExtension( + name="AVR (Audio Visual Research)", + extension=".avr", + priority=["pyav"], + ), + FileExtension( + name="Beam Software SIFF", + extension=".vb", + priority=["pyav"], + ), + FileExtension( + name="CD Graphics", + extension=".cdg", + priority=["pyav"], + ), + FileExtension( + name="Commodore CDXL video", + extension=".cdxl", + priority=["pyav"], + ), + FileExtension( + name="Commodore CDXL video", + extension=".xl", + priority=["pyav"], + ), + FileExtension( + name="DASH Muxer", + extension=".mpd", + priority=["pyav"], + ), + FileExtension( + name="Digital Pictures SGA", + extension=".sga", + priority=["pyav"], + ), + FileExtension( + name="Discworld II BMV", + extension=".bmv", + priority=["pyav"], + ), + FileExtension( + name="DV (Digital Video)", + extension=".dif", + priority=["pyav"], + ), + FileExtension( + name="DV (Digital Video)", + extension=".dv", + priority=["pyav"], + ), + FileExtension( + name="F4V Adobe Flash Video", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="FLV (Flash Video)", + extension=".flv", + priority=["pyav"], + ), + FileExtension( + name="GXF (General eXchange Format)", + extension=".gxf", + priority=["pyav"], + ), + FileExtension( + name="iCE Draw File", + extension=".idf", + priority=["pyav"], + ), + FileExtension( + name="IFV CCTV DVR", + extension=".ifv", + priority=["pyav"], + ), + FileExtension( + name="iPod H.264 MP4 (MPEG-4 Part 14)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="iPod H.264 MP4 (MPEG-4 Part 14)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="iPod H.264 MP4 (MPEG-4 Part 14)", + extension=".m4v", + priority=["pyav"], + ), + FileExtension( + name="IVR (Internet Video Recording)", + extension=".ivr", + priority=["pyav"], + ), + FileExtension( + name="Konami PS2 SVAG", + extension=".svag", + priority=["pyav"], + ), + FileExtension( + name="KUX (YouKu)", + extension=".kux", + priority=["pyav"], + ), + FileExtension( + name="live RTMP FLV (Flash Video)", + extension=".flv", + priority=["pyav"], + ), + FileExtension( + name="Loki SDL MJPEG", + extension=".mjpg", + priority=["pyav"], + ), + FileExtension( + name="LVF", + extension=".lvf", + priority=["pyav"], + ), + FileExtension( + name="Matroska / WebM", + extension=".mk3d", + priority=["pyav"], + ), + FileExtension( + name="Matroska / WebM", + extension=".mka", + priority=["pyav"], + ), + FileExtension( + name="Matroska / WebM", + extension=".mks", + priority=["pyav"], + ), + FileExtension( + name="Microsoft XMV", + extension=".xmv", + priority=["pyav"], + ), + FileExtension( + name="MIME multipart JPEG", + extension=".mjpg", + priority=["pyav"], + ), + FileExtension( + name="MobiClip MODS", + extension=".mods", + priority=["pyav"], + ), + FileExtension( + name="MobiClip MOFLEX", + extension=".moflex", + priority=["pyav"], + ), + FileExtension( + name="Motion Pixels MVI", + extension=".mvi", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="MP4 (MPEG-4 Part 14)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="MPEG-2 PS (DVD VOB)", + extension=".dvd", + priority=["pyav"], + ), + FileExtension( + name="MPEG-2 PS (SVCD)", + extension=".vob", + priority=["pyav"], + ), + FileExtension( + name="MPEG-2 PS (VOB)", + extension=".vob", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".m2t", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".m2ts", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".mts", + priority=["pyav"], + ), + FileExtension( + name="MPEG-TS (MPEG-2 Transport Stream)", + extension=".ts", + priority=["pyav"], + ), + FileExtension( + name="Musepack", + extension=".mpc", + priority=["pyav"], + ), + FileExtension( + name="MXF (Material eXchange Format) Operational Pattern Atom", + extension=".mxf", + priority=["pyav"], + ), + FileExtension( + name="MXF (Material eXchange Format)", + extension=".mxf", + priority=["pyav"], + ), + FileExtension( + name="MxPEG clip", + extension=".mxg", + priority=["pyav"], + ), + FileExtension( + name="NC camera feed", + extension=".v", + priority=["pyav"], + ), + FileExtension( + name="NUT", + extension=".nut", + priority=["pyav"], + ), + FileExtension( + name="Ogg Video", + extension=".ogv", + priority=["pyav"], + ), + FileExtension( + name="Ogg", + extension=".ogg", + priority=["pyav"], + ), + FileExtension( + name="On2 IVF", + extension=".ivf", + priority=["pyav"], + ), + FileExtension( + name="PSP MP4 (MPEG-4 Part 14)", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="Psygnosis YOP", + extension=".yop", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".3g2", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".3gp", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".f4v", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".ism", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".isma", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".ismv", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".m4a", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".m4b", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".mj2", + priority=["pyav"], + ), + FileExtension( + name="QuickTime / MOV", + extension=".psp", + priority=["pyav"], + ), + FileExtension( + name="raw AVS2-P2/IEEE1857.4 video", + extension=".avs", + priority=["pyav"], + ), + FileExtension( + name="raw AVS2-P2/IEEE1857.4 video", + extension=".avs2", + priority=["pyav"], + ), + FileExtension( + name="raw AVS3-P2/IEEE1857.10", + extension=".avs3", + priority=["pyav"], + ), + FileExtension( + name="raw Chinese AVS (Audio Video Standard) video", + extension=".cavs", + priority=["pyav"], + ), + FileExtension( + name="raw Dirac", + extension=".drc", + priority=["pyav"], + ), + FileExtension( + name="raw Dirac", + extension=".vc2", + priority=["pyav"], + ), + FileExtension( + name="raw DNxHD (SMPTE VC-3)", + extension=".dnxhd", + priority=["pyav"], + ), + FileExtension( + name="raw DNxHD (SMPTE VC-3)", + extension=".dnxhr", + priority=["pyav"], + ), + FileExtension( + name="raw GSM", + extension=".gsm", + priority=["pyav"], + ), + FileExtension( + name="raw H.261", + extension=".h261", + priority=["pyav"], + ), + FileExtension( + name="raw H.263", + extension=".h263", + priority=["pyav"], + ), + FileExtension( + name="raw H.264 video", + extension=".264", + priority=["pyav"], + ), + FileExtension( + name="raw H.264 video", + extension=".avc", + priority=["pyav"], + ), + FileExtension( + name="raw H.264 video", + extension=".h264", + priority=["pyav", "FFMPEG"], + ), + FileExtension( + name="raw H.264 video", + extension=".h26l", + priority=["pyav"], + ), + FileExtension( + name="raw HEVC video", + extension=".265", + priority=["pyav"], + ), + FileExtension( + name="raw HEVC video", + extension=".h265", + priority=["pyav"], + ), + FileExtension( + name="raw HEVC video", + extension=".hevc", + priority=["pyav"], + ), + FileExtension( + name="raw id RoQ", + extension=".roq", + priority=["pyav"], + ), + FileExtension( + name="raw Ingenient MJPEG", + extension=".cgi", + priority=["pyav"], + ), + FileExtension( + name="raw IPU Video", + extension=".ipu", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG 2000 video", + extension=".j2k", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG video", + extension=".mjpeg", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG video", + extension=".mjpg", + priority=["pyav"], + ), + FileExtension( + name="raw MJPEG video", + extension=".mpo", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-1 video", + extension=".m1v", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-1 video", + extension=".mpeg", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-1 video", + extension=".mpg", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-2 video", + extension=".m2v", + priority=["pyav"], + ), + FileExtension( + name="raw MPEG-4 video", + extension=".m4v", + priority=["pyav"], + ), + FileExtension( + name="raw VC-1 video", + extension=".vc1", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".cif", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".qcif", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".rgb", + priority=["pyav"], + ), + FileExtension( + name="raw video", + extension=".yuv", + priority=["pyav"], + ), + FileExtension( + name="RealMedia", + extension=".rm", + priority=["pyav"], + ), + FileExtension( + name="SDR2", + extension=".sdr2", + priority=["pyav"], + ), + FileExtension( + name="Sega FILM / CPK", + extension=".cpk", + priority=["pyav"], + ), + FileExtension( + name="SER (Simple uncompressed video format for astronomical capturing)", + extension=".ser", + priority=["pyav"], + ), + FileExtension( + name="Simbiosis Interactive IMX", + extension=".imx", + priority=["pyav"], + ), + FileExtension( + name="Square SVS", + extension=".svs", + priority=["tifffile", "pyav"], + ), + FileExtension( + name="TiVo TY Stream", + extension=".ty", + priority=["pyav"], + ), + FileExtension( + name="TiVo TY Stream", + extension=".ty+", + priority=["pyav"], + ), + FileExtension( + name="Uncompressed 4:2:2 10-bit", + extension=".v210", + priority=["pyav"], + ), + FileExtension( + name="Uncompressed 4:2:2 10-bit", + extension=".yuv10", + priority=["pyav"], + ), + FileExtension( + name="VC-1 test bitstream", + extension=".rcv", + priority=["pyav"], + ), + FileExtension( + name="Video CCTV DAT", + extension=".dat", + priority=["pyav"], + ), + FileExtension( + name="Video DAV", + extension=".dav", + priority=["pyav"], + ), + FileExtension( + name="Vivo", + extension=".viv", + priority=["pyav"], + ), + FileExtension( + name="WebM Chunk Muxer", + extension=".chk", + priority=["pyav"], + ), + FileExtension( + name="WebM", + extension=".mk3d", + priority=["pyav"], + ), + FileExtension( + name="WebM", + extension=".mka", + priority=["pyav"], + ), + FileExtension( + name="WebM", + extension=".mks", + priority=["pyav"], + ), + FileExtension( + name="Windows Television (WTV)", + extension=".wtv", + priority=["pyav"], + ), + FileExtension( + name="Xilam DERF", + extension=".adp", + priority=["pyav"], + ), + FileExtension( + name="YUV4MPEG pipe", + extension=".y4m", + priority=["pyav"], + ), + FileExtension( + extension=".qpi", + priority=["tifffile"], + ), + FileExtension( + name="PCO Camera", + extension=".pcoraw", + priority=["tifffile"], + ), + FileExtension( + name="PCO Camera", + extension=".rec", + priority=["tifffile"], + ), + FileExtension( + name="Perkin Elmer Vectra", + extension=".qptiff", + priority=["tifffile"], + ), + FileExtension( + name="Pyramid Encoded TIFF", + extension=".ptiff", + priority=["tifffile"], + ), + FileExtension( + name="Pyramid Encoded TIFF", + extension=".ptif", + priority=["tifffile"], + ), + FileExtension( + name="Opticks Gel", + extension=".gel", + priority=["tifffile"], + ), + FileExtension( + name="Zoomify Image Format", + extension=".zif", + priority=["tifffile"], + ), + FileExtension( + name="Hamamatsu Slide Scanner", + extension=".ndpi", + priority=["tifffile"], + ), + FileExtension( + name="Roche Digital Pathology", + extension=".bif", + priority=["tifffile"], + ), + FileExtension( + extension=".tf8", + priority=["tifffile"], + ), + FileExtension( + extension=".btf", + priority=["tifffile"], + ), + FileExtension( + name="High Efficiency Image File Format", + extension=".heic", + priority=["pillow"], + ), + FileExtension( + name="AV1 Image File Format", + extension=".avif", + priority=["pillow"], + ), +] +extension_list.sort(key=lambda x: x.extension) + + +known_extensions = dict() +for ext in extension_list: + if ext.extension not in known_extensions: + known_extensions[ext.extension] = list() + known_extensions[ext.extension].append(ext) + +extension_list = [ext for ext_list in known_extensions.values() for ext in ext_list] + +_video_extension_strings = [ + ".264", + ".265", + ".3g2", + ".3gp", + ".a64", + ".A64", + ".adp", + ".amr", + ".amv", + ".asf", + ".avc", + ".avi", + ".avr", + ".avs", + ".avs2", + ".avs3", + ".bmv", + ".cavs", + ".cdg", + ".cdxl", + ".cgi", + ".chk", + ".cif", + ".cpk", + ".dat", + ".dav", + ".dif", + ".dnxhd", + ".dnxhr", + ".drc", + ".dv", + ".dvd", + ".f4v", + ".flm", + ".flv", + ".gsm", + ".gxf", + ".h261", + ".h263", + ".h264", + ".h265", + ".h26l", + ".hevc", + ".idf", + ".ifv", + ".imx", + ".ipu", + ".ism", + ".isma", + ".ismv", + ".ivf", + ".ivr", + ".j2k", + ".kux", + ".lvf", + ".m1v", + ".m2t", + ".m2ts", + ".m2v", + ".m4a", + ".m4b", + ".m4v", + ".mj2", + ".mjpeg", + ".mjpg", + ".mk3d", + ".mka", + ".mks", + ".mkv", + ".mods", + ".moflex", + ".mov", + ".mp4", + ".mpc", + ".mpd", + ".mpeg", + ".mpg", + ".mpo", + ".mts", + ".mvi", + ".mxf", + ".mxg", + ".nut", + ".obu", + ".ogg", + ".ogv", + ".psp", + ".qcif", + ".rcv", + ".rgb", + ".rm", + ".roq", + ".sdr2", + ".ser", + ".sga", + ".svag", + ".svs", + ".ts", + ".ty", + ".ty+", + ".v", + ".v210", + ".vb", + ".vc1", + ".vc2", + ".viv", + ".vob", + ".webm", + ".wmv", + ".wtv", + ".xl", + ".xmv", + ".y4m", + ".yop", + ".yuv", + ".yuv10", +] +video_extensions = list() +for ext_string in _video_extension_strings: + formats = known_extensions[ext_string] + video_extensions.append(formats[0]) +video_extensions.sort(key=lambda x: x.extension) diff --git a/parrot/lib/python3.10/site-packages/imageio/config/extensions.pyi b/parrot/lib/python3.10/site-packages/imageio/config/extensions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..266d0632748d528b6e730a25624919cb7805de6e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/config/extensions.pyi @@ -0,0 +1,24 @@ +from typing import List, Dict, Optional + +class FileExtension: + extension: str + priority: List[str] + name: Optional[str] = None + description: Optional[str] = None + external_link: Optional[str] = None + volume_support: bool + + def __init__( + self, + *, + extension: str, + priority: List[str], + name: str = None, + description: str = None, + external_link: str = None + ) -> None: ... + def reset(self) -> None: ... + +extension_list: List[FileExtension] +known_extensions: Dict[str, List[FileExtension]] +video_extensions: List[FileExtension] diff --git a/parrot/lib/python3.10/site-packages/imageio/config/plugins.py b/parrot/lib/python3.10/site-packages/imageio/config/plugins.py new file mode 100644 index 0000000000000000000000000000000000000000..261dcfb17794fa0695f3e4393dfe9f8ebc72d9bd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/config/plugins.py @@ -0,0 +1,782 @@ +import importlib + +from ..core.legacy_plugin_wrapper import LegacyPlugin + + +class PluginConfig: + """Plugin Configuration Metadata + + This class holds the information needed to lazy-import plugins. + + Parameters + ---------- + name : str + The name of the plugin. + class_name : str + The name of the plugin class inside the plugin module. + module_name : str + The name of the module/package from which to import the plugin. + is_legacy : bool + If True, this plugin is a v2 plugin and will be wrapped in a + LegacyPlugin. Default: False. + package_name : str + If the given module name points to a relative module, then the package + name determines the package it is relative to. + install_name : str + The name of the optional dependency that can be used to install this + plugin if it is missing. + legacy_args : Dict + A dictionary of kwargs to pass to the v2 plugin (Format) upon construction. + + Examples + -------- + >>> PluginConfig( + name="TIFF", + class_name="TiffFormat", + module_name="imageio.plugins.tifffile", + is_legacy=True, + install_name="tifffile", + legacy_args={ + "description": "TIFF format", + "extensions": ".tif .tiff .stk .lsm", + "modes": "iIvV", + }, + ) + >>> PluginConfig( + name="pillow", + class_name="PillowPlugin", + module_name="imageio.plugins.pillow" + ) + + """ + + def __init__( + self, + name, + class_name, + module_name, + *, + is_legacy=False, + package_name=None, + install_name=None, + legacy_args=None, + ): + legacy_args = legacy_args or dict() + + self.name = name + self.class_name = class_name + self.module_name = module_name + self.package_name = package_name + + self.is_legacy = is_legacy + self.install_name = install_name or self.name + self.legacy_args = {"name": name, "description": "A legacy plugin"} + self.legacy_args.update(legacy_args) + + @property + def format(self): + """For backwards compatibility with FormatManager + + Delete when migrating to v3 + """ + if not self.is_legacy: + raise RuntimeError("Can only get format for legacy plugins.") + + module = importlib.import_module(self.module_name, self.package_name) + clazz = getattr(module, self.class_name) + return clazz(**self.legacy_args) + + @property + def plugin_class(self): + """Get the plugin class (import if needed) + + Returns + ------- + plugin_class : Any + The class that can be used to instantiate plugins. + + """ + + module = importlib.import_module(self.module_name, self.package_name) + clazz = getattr(module, self.class_name) + + if self.is_legacy: + legacy_plugin = clazz(**self.legacy_args) + + def partial_legacy_plugin(request): + return LegacyPlugin(request, legacy_plugin) + + clazz = partial_legacy_plugin + + return clazz + + +known_plugins = dict() +known_plugins["pillow"] = PluginConfig( + name="pillow", class_name="PillowPlugin", module_name="imageio.plugins.pillow" +) +known_plugins["pyav"] = PluginConfig( + name="pyav", class_name="PyAVPlugin", module_name="imageio.plugins.pyav" +) +known_plugins["opencv"] = PluginConfig( + name="opencv", class_name="OpenCVPlugin", module_name="imageio.plugins.opencv" +) +known_plugins["tifffile"] = PluginConfig( + name="tifffile", + class_name="TifffilePlugin", + module_name="imageio.plugins.tifffile_v3", +) +known_plugins["SPE"] = PluginConfig( + name="spe", class_name="SpePlugin", module_name="imageio.plugins.spe" +) +known_plugins["rawpy"] = PluginConfig( + name="rawpy", class_name="RawPyPlugin", module_name="imageio.plugins.rawpy" +) + +# Legacy plugins +# ============== +# +# Which are partly registered by format, partly by plugin, and partly by a mix +# of both. We keep the naming here for backwards compatibility. +# In v3 this should become a single entry per plugin named after the plugin +# We can choose extension-specific priority in ``config.extensions``. +# +# Note: Since python 3.7 order of insertion determines the order of dict().keys() +# This means that the order here determines the order by which plugins are +# checked during the full fallback search. We don't advertise this downstream, +# but it could be a useful thing to keep in mind to choose a sensible default +# search order. + +known_plugins["TIFF"] = PluginConfig( + name="TIFF", + class_name="TiffFormat", + module_name="imageio.plugins.tifffile", + is_legacy=True, + install_name="tifffile", + legacy_args={ + "description": "TIFF format", + "extensions": ".tif .tiff .stk .lsm", + "modes": "iIvV", + }, +) + +# PILLOW plugin formats (legacy) +PILLOW_FORMATS = [ + ("BMP", "Windows Bitmap", ".bmp", "PillowFormat"), + ("BUFR", "BUFR", ".bufr", "PillowFormat"), + ("CUR", "Windows Cursor", ".cur", "PillowFormat"), + ("DCX", "Intel DCX", ".dcx", "PillowFormat"), + ("DDS", "DirectDraw Surface", ".dds", "PillowFormat"), + ("DIB", "Windows Bitmap", "", "PillowFormat"), + ("EPS", "Encapsulated Postscript", ".ps .eps", "PillowFormat"), + ("FITS", "FITS", ".fit .fits", "PillowFormat"), + ("FLI", "Autodesk FLI/FLC Animation", ".fli .flc", "PillowFormat"), + ("FPX", "FlashPix", ".fpx", "PillowFormat"), + ("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu", "PillowFormat"), + ("GBR", "GIMP brush file", ".gbr", "PillowFormat"), + ("GIF", "Compuserve GIF", ".gif", "GIFFormat"), + ("GRIB", "GRIB", ".grib", "PillowFormat"), + ("HDF5", "HDF5", ".h5 .hdf", "PillowFormat"), + ("ICNS", "Mac OS icns resource", ".icns", "PillowFormat"), + ("ICO", "Windows Icon", ".ico", "PillowFormat"), + ("IM", "IFUNC Image Memory", ".im", "PillowFormat"), + ("IMT", "IM Tools", "", "PillowFormat"), + ("IPTC", "IPTC/NAA", ".iim", "PillowFormat"), + ("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg", "JPEGFormat"), + ( + "JPEG2000", + "JPEG 2000 (ISO 15444)", + ".jp2 .j2k .jpc .jpf .jpx .j2c", + "JPEG2000Format", + ), + ("MCIDAS", "McIdas area file", "", "PillowFormat"), + ("MIC", "Microsoft Image Composer", ".mic", "PillowFormat"), + # skipped in legacy pillow + # ("MPEG", "MPEG", ".mpg .mpeg", "PillowFormat"), + ("MPO", "MPO (CIPA DC-007)", ".mpo", "PillowFormat"), + ("MSP", "Windows Paint", ".msp", "PillowFormat"), + ("PCD", "Kodak PhotoCD", ".pcd", "PillowFormat"), + ("PCX", "Paintbrush", ".pcx", "PillowFormat"), + ("PIXAR", "PIXAR raster image", ".pxr", "PillowFormat"), + ("PNG", "Portable network graphics", ".png", "PNGFormat"), + ("PPM", "Pbmplus image", ".pbm .pgm .ppm", "PillowFormat"), + ("PSD", "Adobe Photoshop", ".psd", "PillowFormat"), + ("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi", "PillowFormat"), + ("SPIDER", "Spider 2D image", "", "PillowFormat"), + ("SUN", "Sun Raster File", ".ras", "PillowFormat"), + ("TGA", "Targa", ".tga", "PillowFormat"), + ("TIFF", "Adobe TIFF", ".tif .tiff", "TIFFFormat"), + ("WMF", "Windows Metafile", ".wmf .emf", "PillowFormat"), + ("XBM", "X11 Bitmap", ".xbm", "PillowFormat"), + ("XPM", "X11 Pixel Map", ".xpm", "PillowFormat"), + ("XVTHUMB", "XV thumbnail image", "", "PillowFormat"), +] +for id, summary, ext, class_name in PILLOW_FORMATS: + config = PluginConfig( + name=id.upper() + "-PIL", + class_name=class_name, + module_name="imageio.plugins.pillow_legacy", + is_legacy=True, + install_name="pillow", + legacy_args={ + "description": summary + " via Pillow", + "extensions": ext, + "modes": "iI" if class_name == "GIFFormat" else "i", + "plugin_id": id, + }, + ) + known_plugins[config.name] = config + +known_plugins["FFMPEG"] = PluginConfig( + name="FFMPEG", + class_name="FfmpegFormat", + module_name="imageio.plugins.ffmpeg", + is_legacy=True, + install_name="ffmpeg", + legacy_args={ + "description": "Many video formats and cameras (via ffmpeg)", + "extensions": ".mov .avi .mpg .mpeg .mp4 .mkv .webm .wmv .h264", + "modes": "I", + }, +) + +known_plugins["BSDF"] = PluginConfig( + name="BSDF", + class_name="BsdfFormat", + module_name="imageio.plugins.bsdf", + is_legacy=True, + install_name="bsdf", + legacy_args={ + "description": "Format based on the Binary Structured Data Format", + "extensions": ".bsdf", + "modes": "iIvV", + }, +) + +known_plugins["DICOM"] = PluginConfig( + name="DICOM", + class_name="DicomFormat", + module_name="imageio.plugins.dicom", + is_legacy=True, + install_name="dicom", + legacy_args={ + "description": "Digital Imaging and Communications in Medicine", + "extensions": ".dcm .ct .mri", + "modes": "iIvV", + }, +) + +known_plugins["FEI"] = PluginConfig( + name="FEI", + class_name="FEISEMFormat", + module_name="imageio.plugins.feisem", + is_legacy=True, + install_name="feisem", + legacy_args={ + "description": "FEI-SEM TIFF format", + "extensions": [".tif", ".tiff"], + "modes": "iv", + }, +) + +known_plugins["FITS"] = PluginConfig( + name="FITS", + class_name="FitsFormat", + module_name="imageio.plugins.fits", + is_legacy=True, + install_name="fits", + legacy_args={ + "description": "Flexible Image Transport System (FITS) format", + "extensions": ".fits .fit .fts .fz", + "modes": "iIvV", + }, +) + +known_plugins["GDAL"] = PluginConfig( + name="GDAL", + class_name="GdalFormat", + module_name="imageio.plugins.gdal", + is_legacy=True, + install_name="gdal", + legacy_args={ + "description": "Geospatial Data Abstraction Library", + "extensions": ".tiff .tif .img .ecw .jpg .jpeg", + "modes": "iIvV", + }, +) + +known_plugins["ITK"] = PluginConfig( + name="ITK", + class_name="ItkFormat", + module_name="imageio.plugins.simpleitk", + is_legacy=True, + install_name="simpleitk", + legacy_args={ + "description": "Insight Segmentation and Registration Toolkit (ITK) format", + "extensions": " ".join( + ( + ".gipl", + ".ipl", + ".mha", + ".mhd", + ".nhdr", + ".nia", + ".hdr", + ".nrrd", + ".nii", + ".nii.gz", + ".img", + ".img.gz", + ".vtk", + ".hdf5", + ".lsm", + ".mnc", + ".mnc2", + ".mgh", + ".mnc", + ".pic", + ".bmp", + ".jpeg", + ".jpg", + ".png", + ".tiff", + ".tif", + ".dicom", + ".dcm", + ".gdcm", + ) + ), + "modes": "iIvV", + }, +) + +known_plugins["NPZ"] = PluginConfig( + name="NPZ", + class_name="NpzFormat", + module_name="imageio.plugins.npz", + is_legacy=True, + install_name="numpy", + legacy_args={ + "description": "Numpy's compressed array format", + "extensions": ".npz", + "modes": "iIvV", + }, +) + +known_plugins["SWF"] = PluginConfig( + name="SWF", + class_name="SWFFormat", + module_name="imageio.plugins.swf", + is_legacy=True, + install_name="swf", + legacy_args={ + "description": "Shockwave flash", + "extensions": ".swf", + "modes": "I", + }, +) + +known_plugins["SCREENGRAB"] = PluginConfig( + name="SCREENGRAB", + class_name="ScreenGrabFormat", + module_name="imageio.plugins.grab", + is_legacy=True, + install_name="pillow", + legacy_args={ + "description": "Grab screenshots (Windows and OS X only)", + "extensions": [], + "modes": "i", + }, +) + +known_plugins["CLIPBOARDGRAB"] = PluginConfig( + name="CLIPBOARDGRAB", + class_name="ClipboardGrabFormat", + module_name="imageio.plugins.grab", + is_legacy=True, + install_name="pillow", + legacy_args={ + "description": "Grab from clipboard (Windows only)", + "extensions": [], + "modes": "i", + }, +) + +# LYTRO plugin (legacy) +lytro_formats = [ + ("lytro-lfr", "Lytro Illum lfr image file", ".lfr", "i", "LytroLfrFormat"), + ( + "lytro-illum-raw", + "Lytro Illum raw image file", + ".raw", + "i", + "LytroIllumRawFormat", + ), + ("lytro-lfp", "Lytro F01 lfp image file", ".lfp", "i", "LytroLfpFormat"), + ("lytro-f01-raw", "Lytro F01 raw image file", ".raw", "i", "LytroF01RawFormat"), +] +for name, des, ext, mode, class_name in lytro_formats: + config = PluginConfig( + name=name.upper(), + class_name=class_name, + module_name="imageio.plugins.lytro", + is_legacy=True, + install_name="lytro", + legacy_args={ + "description": des, + "extensions": ext, + "modes": mode, + }, + ) + known_plugins[config.name] = config + +# FreeImage plugin (legacy) +FREEIMAGE_FORMATS = [ + ( + "BMP", + 0, + "Windows or OS/2 Bitmap", + ".bmp", + "i", + "FreeimageBmpFormat", + "imageio.plugins.freeimage", + ), + ( + "CUT", + 21, + "Dr. Halo", + ".cut", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "DDS", + 24, + "DirectX Surface", + ".dds", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "EXR", + 29, + "ILM OpenEXR", + ".exr", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "G3", + 27, + "Raw fax format CCITT G.3", + ".g3", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "GIF", + 25, + "Static and animated gif (FreeImage)", + ".gif", + "iI", + "GifFormat", + "imageio.plugins.freeimagemulti", + ), + ( + "HDR", + 26, + "High Dynamic Range Image", + ".hdr", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "ICO", + 1, + "Windows Icon", + ".ico", + "iI", + "IcoFormat", + "imageio.plugins.freeimagemulti", + ), + ( + "IFF", + 5, + "IFF Interleaved Bitmap", + ".iff .lbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "J2K", + 30, + "JPEG-2000 codestream", + ".j2k .j2c", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "JNG", + 3, + "JPEG Network Graphics", + ".jng", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "JP2", + 31, + "JPEG-2000 File Format", + ".jp2", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "JPEG", + 2, + "JPEG - JFIF Compliant", + ".jpg .jif .jpeg .jpe", + "i", + "FreeimageJpegFormat", + "imageio.plugins.freeimage", + ), + ( + "JPEG-XR", + 36, + "JPEG XR image format", + ".jxr .wdp .hdp", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "KOALA", + 4, + "C64 Koala Graphics", + ".koa", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + # not registered in legacy pillow + # ("MNG", 6, "Multiple-image Network Graphics", ".mng", "i", "FreeimageFormat", "imageio.plugins.freeimage"), + ( + "PBM", + 7, + "Portable Bitmap (ASCII)", + ".pbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PBMRAW", + 8, + "Portable Bitmap (RAW)", + ".pbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PCD", + 9, + "Kodak PhotoCD", + ".pcd", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PCX", + 10, + "Zsoft Paintbrush", + ".pcx", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PFM", + 32, + "Portable floatmap", + ".pfm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PGM", + 11, + "Portable Greymap (ASCII)", + ".pgm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PGMRAW", + 12, + "Portable Greymap (RAW)", + ".pgm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PICT", + 33, + "Macintosh PICT", + ".pct .pict .pic", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "PNG", + 13, + "Portable Network Graphics", + ".png", + "i", + "FreeimagePngFormat", + "imageio.plugins.freeimage", + ), + ( + "PPM", + 14, + "Portable Pixelmap (ASCII)", + ".ppm", + "i", + "FreeimagePnmFormat", + "imageio.plugins.freeimage", + ), + ( + "PPMRAW", + 15, + "Portable Pixelmap (RAW)", + ".ppm", + "i", + "FreeimagePnmFormat", + "imageio.plugins.freeimage", + ), + ( + "PSD", + 20, + "Adobe Photoshop", + ".psd", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "RAS", + 16, + "Sun Raster Image", + ".ras", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "RAW", + 34, + "RAW camera image", + ".3fr .arw .bay .bmq .cap .cine .cr2 .crw .cs1 .dc2 " + ".dcr .drf .dsc .dng .erf .fff .ia .iiq .k25 .kc2 .kdc .mdc .mef .mos .mrw .nef .nrw .orf " + ".pef .ptx .pxn .qtk .raf .raw .rdc .rw2 .rwl .rwz .sr2 .srf .srw .sti", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "SGI", + 28, + "SGI Image Format", + ".sgi .rgb .rgba .bw", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "TARGA", + 17, + "Truevision Targa", + ".tga .targa", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "TIFF", + 18, + "Tagged Image File Format", + ".tif .tiff", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "WBMP", + 19, + "Wireless Bitmap", + ".wap .wbmp .wbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "WebP", + 35, + "Google WebP image format", + ".webp", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "XBM", + 22, + "X11 Bitmap Format", + ".xbm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), + ( + "XPM", + 23, + "X11 Pixmap Format", + ".xpm", + "i", + "FreeimageFormat", + "imageio.plugins.freeimage", + ), +] +for name, i, des, ext, mode, class_name, module_name in FREEIMAGE_FORMATS: + config = PluginConfig( + name=name.upper() + "-FI", + class_name=class_name, + module_name=module_name, + is_legacy=True, + install_name="freeimage", + legacy_args={ + "description": des, + "extensions": ext, + "modes": mode, + "fif": i, + }, + ) + known_plugins[config.name] = config + +# exists for backwards compatibility with FormatManager +# delete in V3 +_original_order = [x for x, config in known_plugins.items() if config.is_legacy] diff --git a/parrot/lib/python3.10/site-packages/imageio/config/plugins.pyi b/parrot/lib/python3.10/site-packages/imageio/config/plugins.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ab5d4a816257e0f501fce1c087e74a3d1f66dc13 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/config/plugins.pyi @@ -0,0 +1,28 @@ +from typing import Any, Dict, Optional +from ..core.v3_plugin_api import PluginV3 + +class PluginConfig: + name: str + class_name: str + module_name: str + is_legacy: bool + package_name: Optional[str] = None + install_name: Optional[str] = None + legacy_args: Optional[dict] = None + @property + def format(self) -> Any: ... + @property + def plugin_class(self) -> PluginV3: ... + def __init__( + self, + name: str, + class_name: str, + module_name: str, + *, + is_legacy: bool = False, + package_name: str = None, + install_name: str = None, + legacy_args: dict = None, + ) -> None: ... + +known_plugins: Dict[str, PluginConfig] diff --git a/parrot/lib/python3.10/site-packages/imageio/freeze.py b/parrot/lib/python3.10/site-packages/imageio/freeze.py new file mode 100644 index 0000000000000000000000000000000000000000..3753a29df665e416030b4eb0453ed3430a4c78fc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/freeze.py @@ -0,0 +1,11 @@ +""" +Helper functions for freezing imageio. +""" + + +def get_includes(): + return ["email", "urllib.request", "numpy", "zipfile", "io"] + + +def get_excludes(): + return [] diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__init__.py b/parrot/lib/python3.10/site-packages/imageio/plugins/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..741415e955069d300bee8d8bc529ea0df742d700 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/__init__.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +# flake8: noqa + +""" +Here you can find documentation on how to write your own plugin to allow +ImageIO to access a new backend. Plugins are quite object oriented, and +the relevant classes and their interaction are documented here: + +.. currentmodule:: imageio + +.. autosummary:: + :toctree: ../_autosummary + :template: better_class.rst + + imageio.core.Format + imageio.core.Request + +.. note:: + You can always check existing plugins if you want to see examples. + +What methods to implement +------------------------- + +To implement a new plugin, create a new class that inherits from +:class:`imageio.core.Format`. and implement the following functions: + +.. autosummary:: + :toctree: ../_autosummary + + imageio.core.Format.__init__ + imageio.core.Format._can_read + imageio.core.Format._can_write + +Further, each format contains up to two nested classes; one for reading and +one for writing. To support reading and/or writing, the respective classes +need to be defined. + +For reading, create a nested class that inherits from +``imageio.core.Format.Reader`` and that implements the following functions: + + * Implement ``_open(**kwargs)`` to initialize the reader. Deal with the + user-provided keyword arguments here. + * Implement ``_close()`` to clean up. + * Implement ``_get_length()`` to provide a suitable length based on what + the user expects. Can be ``inf`` for streaming data. + * Implement ``_get_data(index)`` to return an array and a meta-data dict. + * Implement ``_get_meta_data(index)`` to return a meta-data dict. If index + is None, it should return the 'global' meta-data. + +For writing, create a nested class that inherits from +``imageio.core.Format.Writer`` and implement the following functions: + + * Implement ``_open(**kwargs)`` to initialize the writer. Deal with the + user-provided keyword arguments here. + * Implement ``_close()`` to clean up. + * Implement ``_append_data(im, meta)`` to add data (and meta-data). + * Implement ``_set_meta_data(meta)`` to set the global meta-data. + +""" + +import importlib +import os +import warnings + + +# v2 imports remove in v3 +from .. import formats + +# v2 allows formatting plugins by environment variable +# this is done here. +env_plugin_order = os.getenv("IMAGEIO_FORMAT_ORDER", None) +if env_plugin_order is not None: # pragma: no cover + warnings.warn( + "Setting plugin priority through an environment variable is" + " deprecated and will be removed in ImageIO v3. There is no" + " replacement planned for this feature. If you have an" + " active use-case for it, please reach out to us on GitHub.", + DeprecationWarning, + ) + + formats.sort(*os.getenv("IMAGEIO_FORMAT_ORDER", "").split(",")) + + +# this class replaces plugin module. For details +# see https://stackoverflow.com/questions/2447353/getattr-on-a-module +def __getattr__(name): + """Lazy-Import Plugins + + This function dynamically loads plugins into the imageio.plugin + namespace upon first access. For example, the following snippet will + delay importing freeimage until the second line: + + >>> import imageio + >>> imageio.plugins.freeimage.download() + + """ + + try: + return importlib.import_module(f"imageio.plugins.{name}") + except ImportError: + raise AttributeError(f"module '{__name__}' has no attribute '{name}'") from None diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbe0aa22575159abe0348ed2b96b40ab0653a44e Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e4e76187e7cf384cb12e231d28a6b2f20dcc115 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/example.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/example.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0983676a5a7047a84003326a0d9f58a18eee1170 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/example.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimage.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd3cd1c012afc7a7226ce65558864eb679293ba3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimage.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0020a17607327af8988a1a6e14bf0ff9689d96e8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/gdal.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/gdal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9791315c18bae4cf9a900df76dabc031115d265c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/gdal.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/grab.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/grab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e93060ae9926c0b1a39626705547e0d878896f67 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/grab.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/lytro.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/lytro.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..476f488b1b2454e11588f3f11f881ec70644da7b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/lytro.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/opencv.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/opencv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f80bef3ae8974a10b2294f8eec094ae13a70a2b8 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/opencv.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f78a2cc396016f38ffebeda36b3c40d60e748fc9 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..002abbc2833ab105aa18043bc54adc3f82d9d6c3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillow_legacy.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49f506d1653401ddc03b9107266d7f018079ac34 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/rawpy.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/rawpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c4c1dce32062e439c43805653936fb4a3e4ea94 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/rawpy.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/spe.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/spe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b842496425113ab6f79dd17c95a1d671745a448 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/spe.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/swf.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/swf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e46f65eda85162628c893b4aaadb9b4bb220ea2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/swf.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile.cpython-310.pyc b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9414e7130eb5114fa98a0fdcccc98e800b60d730 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/imageio/plugins/__pycache__/tifffile.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/_bsdf.py b/parrot/lib/python3.10/site-packages/imageio/plugins/_bsdf.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f56ce0dc471b0c13bc4e10f0ce9a26b7303bce --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/_bsdf.py @@ -0,0 +1,915 @@ +#!/usr/bin/env python +# This file is distributed under the terms of the 2-clause BSD License. +# Copyright (c) 2017-2018, Almar Klein + +""" +Python implementation of the Binary Structured Data Format (BSDF). + +BSDF is a binary format for serializing structured (scientific) data. +See http://bsdf.io for more information. + +This is the reference implementation, which is relatively relatively +sophisticated, providing e.g. lazy loading of blobs and streamed +reading/writing. A simpler Python implementation is available as +``bsdf_lite.py``. + +This module has no dependencies and works on Python 2.7 and 3.4+. + +Note: on Legacy Python (Python 2.7), non-Unicode strings are encoded as bytes. +""" + +# todo: in 2020, remove six stuff, __future__ and _isidentifier +# todo: in 2020, remove 'utf-8' args to encode/decode; it's faster + +from __future__ import absolute_import, division, print_function + +import bz2 +import hashlib +import logging +import os +import re +import struct +import sys +import types +import zlib +from io import BytesIO + +logger = logging.getLogger(__name__) + +# Notes on versioning: the major and minor numbers correspond to the +# BSDF format version. The major number if increased when backward +# incompatible changes are introduced. An implementation must raise an +# exception when the file being read has a higher major version. The +# minor number is increased when new backward compatible features are +# introduced. An implementation must display a warning when the file +# being read has a higher minor version. The patch version is increased +# for subsequent releases of the implementation. +VERSION = 2, 1, 2 +__version__ = ".".join(str(i) for i in VERSION) + + +# %% The encoder and decoder implementation + +# From six.py +PY3 = sys.version_info[0] >= 3 +if PY3: + text_type = str + string_types = str + unicode_types = str + integer_types = int + classtypes = type +else: # pragma: no cover + logging.basicConfig() # avoid "no handlers found" error + text_type = unicode # noqa + string_types = basestring # noqa + unicode_types = unicode # noqa + integer_types = (int, long) # noqa + classtypes = type, types.ClassType + +# Shorthands +spack = struct.pack +strunpack = struct.unpack + + +def lencode(x): + """Encode an unsigned integer into a variable sized blob of bytes.""" + # We could support 16 bit and 32 bit as well, but the gain is low, since + # 9 bytes for collections with over 250 elements is marginal anyway. + if x <= 250: + return spack(" extension + self._extensions_by_cls = {} # cls -> (name, extension.encode) + if extensions is None: + extensions = standard_extensions + for extension in extensions: + self.add_extension(extension) + self._parse_options(**options) + + def _parse_options( + self, + compression=0, + use_checksum=False, + float64=True, + load_streaming=False, + lazy_blob=False, + ): + # Validate compression + if isinstance(compression, string_types): + m = {"no": 0, "zlib": 1, "bz2": 2} + compression = m.get(compression.lower(), compression) + if compression not in (0, 1, 2): + raise TypeError("Compression must be 0, 1, 2, " '"no", "zlib", or "bz2"') + self._compression = compression + + # Other encoding args + self._use_checksum = bool(use_checksum) + self._float64 = bool(float64) + + # Decoding args + self._load_streaming = bool(load_streaming) + self._lazy_blob = bool(lazy_blob) + + def add_extension(self, extension_class): + """Add an extension to this serializer instance, which must be + a subclass of Extension. Can be used as a decorator. + """ + # Check class + if not ( + isinstance(extension_class, type) and issubclass(extension_class, Extension) + ): + raise TypeError("add_extension() expects a Extension class.") + extension = extension_class() + + # Get name + name = extension.name + if not isinstance(name, str): + raise TypeError("Extension name must be str.") + if len(name) == 0 or len(name) > 250: + raise NameError( + "Extension names must be nonempty and shorter " "than 251 chars." + ) + if name in self._extensions: + logger.warning( + 'BSDF warning: overwriting extension "%s", ' + "consider removing first" % name + ) + + # Get classes + cls = extension.cls + if not cls: + clss = [] + elif isinstance(cls, (tuple, list)): + clss = cls + else: + clss = [cls] + for cls in clss: + if not isinstance(cls, classtypes): + raise TypeError("Extension classes must be types.") + + # Store + for cls in clss: + self._extensions_by_cls[cls] = name, extension.encode + self._extensions[name] = extension + return extension_class + + def remove_extension(self, name): + """Remove a converted by its unique name.""" + if not isinstance(name, str): + raise TypeError("Extension name must be str.") + if name in self._extensions: + self._extensions.pop(name) + for cls in list(self._extensions_by_cls.keys()): + if self._extensions_by_cls[cls][0] == name: + self._extensions_by_cls.pop(cls) + + def _encode(self, f, value, streams, ext_id): + """Main encoder function.""" + x = encode_type_id + + if value is None: + f.write(x(b"v", ext_id)) # V for void + elif value is True: + f.write(x(b"y", ext_id)) # Y for yes + elif value is False: + f.write(x(b"n", ext_id)) # N for no + elif isinstance(value, integer_types): + if -32768 <= value <= 32767: + f.write(x(b"h", ext_id) + spack("h", value)) # H for ... + else: + f.write(x(b"i", ext_id) + spack(" 0: + raise ValueError("Can only have one stream per file.") + streams.append(value) + value._activate(f, self._encode, self._decode) # noqa + else: + if ext_id is not None: + raise ValueError( + "Extension %s wronfully encodes object to another " + "extension object (though it may encode to a list/dict " + "that contains other extension objects)." % ext_id + ) + # Try if the value is of a type we know + ex = self._extensions_by_cls.get(value.__class__, None) + # Maybe its a subclass of a type we know + if ex is None: + for name, c in self._extensions.items(): + if c.match(self, value): + ex = name, c.encode + break + else: + ex = None + # Success or fail + if ex is not None: + ext_id2, extension_encode = ex + self._encode(f, extension_encode(self, value), streams, ext_id2) + else: + t = ( + "Class %r is not a valid base BSDF type, nor is it " + "handled by an extension." + ) + raise TypeError(t % value.__class__.__name__) + + def _decode(self, f): + """Main decoder function.""" + + # Get value + char = f.read(1) + c = char.lower() + + # Conversion (uppercase value identifiers signify converted values) + if not char: + raise EOFError() + elif char != c: + n = strunpack("= 254: + # Streaming + closed = n == 254 + n = strunpack(" 0 + name = f.read(n_name).decode("UTF-8") + value[name] = self._decode(f) + elif c == b"b": + if self._lazy_blob: + value = Blob((f, True)) + else: + blob = Blob((f, False)) + value = blob.get_bytes() + else: + raise RuntimeError("Parse error %r" % char) + + # Convert value if we have an extension for it + if ext_id is not None: + extension = self._extensions.get(ext_id, None) + if extension is not None: + value = extension.decode(self, value) + else: + logger.warning("BSDF warning: no extension found for %r" % ext_id) + + return value + + def encode(self, ob): + """Save the given object to bytes.""" + f = BytesIO() + self.save(f, ob) + return f.getvalue() + + def save(self, f, ob): + """Write the given object to the given file object.""" + f.write(b"BSDF") + f.write(struct.pack(" 0: + stream = streams[0] + if stream._start_pos != f.tell(): + raise ValueError( + "The stream object must be " "the last object to be encoded." + ) + + def decode(self, bb): + """Load the data structure that is BSDF-encoded in the given bytes.""" + f = BytesIO(bb) + return self.load(f) + + def load(self, f): + """Load a BSDF-encoded object from the given file object.""" + # Check magic string + f4 = f.read(4) + if f4 != b"BSDF": + raise RuntimeError("This does not look like a BSDF file: %r" % f4) + # Check version + major_version = strunpack(" VERSION[1]: # minor should be < ours + t = ( + "BSDF warning: reading file with higher minor version (%s) " + "than the implementation (%s)." + ) + logger.warning(t % (__version__, file_version)) + + return self._decode(f) + + +# %% Streaming and blob-files + + +class BaseStream(object): + """Base class for streams.""" + + def __init__(self, mode="w"): + self._i = 0 + self._count = -1 + if isinstance(mode, int): + self._count = mode + mode = "r" + elif mode == "w": + self._count = 0 + assert mode in ("r", "w") + self._mode = mode + self._f = None + self._start_pos = 0 + + def _activate(self, file, encode_func, decode_func): + if self._f is not None: # Associated with another write + raise IOError("Stream object cannot be activated twice?") + self._f = file + self._start_pos = self._f.tell() + self._encode = encode_func + self._decode = decode_func + + @property + def mode(self): + """The mode of this stream: 'r' or 'w'.""" + return self._mode + + +class ListStream(BaseStream): + """A streamable list object used for writing or reading. + In read mode, it can also be iterated over. + """ + + @property + def count(self): + """The number of elements in the stream (can be -1 for unclosed + streams in read-mode). + """ + return self._count + + @property + def index(self): + """The current index of the element to read/write.""" + return self._i + + def append(self, item): + """Append an item to the streaming list. The object is immediately + serialized and written to the underlying file. + """ + # if self._mode != 'w': + # raise IOError('This ListStream is not in write mode.') + if self._count != self._i: + raise IOError("Can only append items to the end of the stream.") + if self._f is None: + raise IOError("List stream is not associated with a file yet.") + if self._f.closed: + raise IOError("Cannot stream to a close file.") + self._encode(self._f, item, [self], None) + self._i += 1 + self._count += 1 + + def close(self, unstream=False): + """Close the stream, marking the number of written elements. New + elements may still be appended, but they won't be read during decoding. + If ``unstream`` is False, the stream is turned into a regular list + (not streaming). + """ + # if self._mode != 'w': + # raise IOError('This ListStream is not in write mode.') + if self._count != self._i: + raise IOError("Can only close when at the end of the stream.") + if self._f is None: + raise IOError("ListStream is not associated with a file yet.") + if self._f.closed: + raise IOError("Cannot close a stream on a close file.") + i = self._f.tell() + self._f.seek(self._start_pos - 8 - 1) + self._f.write(spack("= 0: + if self._i >= self._count: + raise StopIteration() + self._i += 1 + return self._decode(self._f) + else: + # This raises EOFError at some point. + try: + res = self._decode(self._f) + self._i += 1 + return res + except EOFError: + self._count = self._i + raise StopIteration() + + def __iter__(self): + if self._mode != "r": + raise IOError("Cannot iterate: ListStream in not in read mode.") + return self + + def __next__(self): + return self.next() + + +class Blob(object): + """Object to represent a blob of bytes. When used to write a BSDF file, + it's a wrapper for bytes plus properties such as what compression to apply. + When used to read a BSDF file, it can be used to read the data lazily, and + also modify the data if reading in 'r+' mode and the blob isn't compressed. + """ + + # For now, this does not allow re-sizing blobs (within the allocated size) + # but this can be added later. + + def __init__(self, bb, compression=0, extra_size=0, use_checksum=False): + if isinstance(bb, bytes): + self._f = None + self.compressed = self._from_bytes(bb, compression) + self.compression = compression + self.allocated_size = self.used_size + extra_size + self.use_checksum = use_checksum + elif isinstance(bb, tuple) and len(bb) == 2 and hasattr(bb[0], "read"): + self._f, allow_seek = bb + self.compressed = None + self._from_file(self._f, allow_seek) + self._modified = False + else: + raise TypeError("Wrong argument to create Blob.") + + def _from_bytes(self, value, compression): + """When used to wrap bytes in a blob.""" + if compression == 0: + compressed = value + elif compression == 1: + compressed = zlib.compress(value, 9) + elif compression == 2: + compressed = bz2.compress(value, 9) + else: # pragma: no cover + assert False, "Unknown compression identifier" + + self.data_size = len(value) + self.used_size = len(compressed) + return compressed + + def _to_file(self, f): + """Private friend method called by encoder to write a blob to a file.""" + # Write sizes - write at least in a size that allows resizing + if self.allocated_size <= 250 and self.compression == 0: + f.write(spack(" self.allocated_size: + raise IOError("Seek beyond blob boundaries.") + self._f.seek(self.start_pos + p) + + def tell(self): + """Get the current file pointer position (relative to the blob start).""" + if self._f is None: + raise RuntimeError( + "Cannot tell in a blob " "that is not created by the BSDF decoder." + ) + return self._f.tell() - self.start_pos + + def write(self, bb): + """Write bytes to the blob.""" + if self._f is None: + raise RuntimeError( + "Cannot write in a blob " "that is not created by the BSDF decoder." + ) + if self.compression: + raise IOError("Cannot arbitrarily write in compressed blob.") + if self._f.tell() + len(bb) > self.end_pos: + raise IOError("Write beyond blob boundaries.") + self._modified = True + return self._f.write(bb) + + def read(self, n): + """Read n bytes from the blob.""" + if self._f is None: + raise RuntimeError( + "Cannot read in a blob " "that is not created by the BSDF decoder." + ) + if self.compression: + raise IOError("Cannot arbitrarily read in compressed blob.") + if self._f.tell() + n > self.end_pos: + raise IOError("Read beyond blob boundaries.") + return self._f.read(n) + + def get_bytes(self): + """Get the contents of the blob as bytes.""" + if self.compressed is not None: + compressed = self.compressed + else: + i = self._f.tell() + self.seek(0) + compressed = self._f.read(self.used_size) + self._f.seek(i) + if self.compression == 0: + value = compressed + elif self.compression == 1: + value = zlib.decompress(compressed) + elif self.compression == 2: + value = bz2.decompress(compressed) + else: # pragma: no cover + raise RuntimeError("Invalid compression %i" % self.compression) + return value + + def update_checksum(self): + """Reset the blob's checksum if present. Call this after modifying + the data. + """ + # or ... should the presence of a checksum mean that data is proteced? + if self.use_checksum and self._modified: + self.seek(0) + compressed = self._f.read(self.used_size) + self._f.seek(self.start_pos - self.alignment - 1 - 16) + self._f.write(hashlib.md5(compressed).digest()) + + +# %% High-level functions + + +def encode(ob, extensions=None, **options): + """Save (BSDF-encode) the given object to bytes. + See `BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + return s.encode(ob) + + +def save(f, ob, extensions=None, **options): + """Save (BSDF-encode) the given object to the given filename or + file object. See` BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + if isinstance(f, string_types): + with open(f, "wb") as fp: + return s.save(fp, ob) + else: + return s.save(f, ob) + + +def decode(bb, extensions=None, **options): + """Load a (BSDF-encoded) structure from bytes. + See `BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + return s.decode(bb) + + +def load(f, extensions=None, **options): + """Load a (BSDF-encoded) structure from the given filename or file object. + See `BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + if isinstance(f, string_types): + if f.startswith(("~/", "~\\")): # pragma: no cover + f = os.path.expanduser(f) + with open(f, "rb") as fp: + return s.load(fp) + else: + return s.load(f) + + +# Aliases for json compat +loads = decode +dumps = encode + + +# %% Standard extensions + +# Defining extensions as a dict would be more compact and feel lighter, but +# that would only allow lambdas, which is too limiting, e.g. for ndarray +# extension. + + +class Extension(object): + """Base class to implement BSDF extensions for special data types. + + Extension classes are provided to the BSDF serializer, which + instantiates the class. That way, the extension can be somewhat dynamic: + e.g. the NDArrayExtension exposes the ndarray class only when numpy + is imported. + + A extension instance must have two attributes. These can be attributes of + the class, or of the instance set in ``__init__()``: + + * name (str): the name by which encoded values will be identified. + * cls (type): the type (or list of types) to match values with. + This is optional, but it makes the encoder select extensions faster. + + Further, it needs 3 methods: + + * `match(serializer, value) -> bool`: return whether the extension can + convert the given value. The default is ``isinstance(value, self.cls)``. + * `encode(serializer, value) -> encoded_value`: the function to encode a + value to more basic data types. + * `decode(serializer, encoded_value) -> value`: the function to decode an + encoded value back to its intended representation. + + """ + + name = "" + cls = () + + def __repr__(self): + return "" % (self.name, hex(id(self))) + + def match(self, s, v): + return isinstance(v, self.cls) + + def encode(self, s, v): + raise NotImplementedError() + + def decode(self, s, v): + raise NotImplementedError() + + +class ComplexExtension(Extension): + name = "c" + cls = complex + + def encode(self, s, v): + return (v.real, v.imag) + + def decode(self, s, v): + return complex(v[0], v[1]) + + +class NDArrayExtension(Extension): + name = "ndarray" + + def __init__(self): + if "numpy" in sys.modules: + import numpy as np + + self.cls = np.ndarray + + def match(self, s, v): # pragma: no cover - e.g. work for nd arrays in JS + return hasattr(v, "shape") and hasattr(v, "dtype") and hasattr(v, "tobytes") + + def encode(self, s, v): + return dict(shape=v.shape, dtype=text_type(v.dtype), data=v.tobytes()) + + def decode(self, s, v): + try: + import numpy as np + except ImportError: # pragma: no cover + return v + a = np.frombuffer(v["data"], dtype=v["dtype"]) + a.shape = v["shape"] + return a + + +standard_extensions = [ComplexExtension, NDArrayExtension] + + +if __name__ == "__main__": + # Invoke CLI + import bsdf_cli + + bsdf_cli.main() diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/_dicom.py b/parrot/lib/python3.10/site-packages/imageio/plugins/_dicom.py new file mode 100644 index 0000000000000000000000000000000000000000..2f2f7ac511e2ef75b25c203b075724dcc152f564 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/_dicom.py @@ -0,0 +1,932 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin for reading DICOM files. +""" + +# todo: Use pydicom: +# * Note: is not py3k ready yet +# * Allow reading the full meta info +# I think we can more or less replace the SimpleDicomReader with a +# pydicom.Dataset For series, only ned to read the full info from one +# file: speed still high +# * Perhaps allow writing? + +import sys +import os +import struct +import logging + +import numpy as np + + +logger = logging.getLogger(__name__) + +# Determine endianity of system +sys_is_little_endian = sys.byteorder == "little" + +# Define a dictionary that contains the tags that we would like to know +MINIDICT = { + (0x7FE0, 0x0010): ("PixelData", "OB"), + # Date and time + (0x0008, 0x0020): ("StudyDate", "DA"), + (0x0008, 0x0021): ("SeriesDate", "DA"), + (0x0008, 0x0022): ("AcquisitionDate", "DA"), + (0x0008, 0x0023): ("ContentDate", "DA"), + (0x0008, 0x0030): ("StudyTime", "TM"), + (0x0008, 0x0031): ("SeriesTime", "TM"), + (0x0008, 0x0032): ("AcquisitionTime", "TM"), + (0x0008, 0x0033): ("ContentTime", "TM"), + # With what, where, by whom? + (0x0008, 0x0060): ("Modality", "CS"), + (0x0008, 0x0070): ("Manufacturer", "LO"), + (0x0008, 0x0080): ("InstitutionName", "LO"), + # Descriptions + (0x0008, 0x1030): ("StudyDescription", "LO"), + (0x0008, 0x103E): ("SeriesDescription", "LO"), + # UID's + (0x0008, 0x0016): ("SOPClassUID", "UI"), + (0x0008, 0x0018): ("SOPInstanceUID", "UI"), + (0x0020, 0x000D): ("StudyInstanceUID", "UI"), + (0x0020, 0x000E): ("SeriesInstanceUID", "UI"), + (0x0008, 0x0117): ("ContextUID", "UI"), + # Numbers + (0x0020, 0x0011): ("SeriesNumber", "IS"), + (0x0020, 0x0012): ("AcquisitionNumber", "IS"), + (0x0020, 0x0013): ("InstanceNumber", "IS"), + (0x0020, 0x0014): ("IsotopeNumber", "IS"), + (0x0020, 0x0015): ("PhaseNumber", "IS"), + (0x0020, 0x0016): ("IntervalNumber", "IS"), + (0x0020, 0x0017): ("TimeSlotNumber", "IS"), + (0x0020, 0x0018): ("AngleNumber", "IS"), + (0x0020, 0x0019): ("ItemNumber", "IS"), + (0x0020, 0x0020): ("PatientOrientation", "CS"), + (0x0020, 0x0030): ("ImagePosition", "CS"), + (0x0020, 0x0032): ("ImagePositionPatient", "CS"), + (0x0020, 0x0035): ("ImageOrientation", "CS"), + (0x0020, 0x0037): ("ImageOrientationPatient", "CS"), + # Patient information + (0x0010, 0x0010): ("PatientName", "PN"), + (0x0010, 0x0020): ("PatientID", "LO"), + (0x0010, 0x0030): ("PatientBirthDate", "DA"), + (0x0010, 0x0040): ("PatientSex", "CS"), + (0x0010, 0x1010): ("PatientAge", "AS"), + (0x0010, 0x1020): ("PatientSize", "DS"), + (0x0010, 0x1030): ("PatientWeight", "DS"), + # Image specific (required to construct numpy array) + (0x0028, 0x0002): ("SamplesPerPixel", "US"), + (0x0028, 0x0008): ("NumberOfFrames", "IS"), + (0x0028, 0x0100): ("BitsAllocated", "US"), + (0x0028, 0x0101): ("BitsStored", "US"), + (0x0028, 0x0102): ("HighBit", "US"), + (0x0028, 0x0103): ("PixelRepresentation", "US"), + (0x0028, 0x0010): ("Rows", "US"), + (0x0028, 0x0011): ("Columns", "US"), + (0x0028, 0x1052): ("RescaleIntercept", "DS"), + (0x0028, 0x1053): ("RescaleSlope", "DS"), + # Image specific (for the user) + (0x0028, 0x0030): ("PixelSpacing", "DS"), + (0x0018, 0x0088): ("SliceSpacing", "DS"), +} + +# Define some special tags: +# See PS 3.5-2008 section 7.5 (p.40) +ItemTag = (0xFFFE, 0xE000) # start of Sequence Item +ItemDelimiterTag = (0xFFFE, 0xE00D) # end of Sequence Item +SequenceDelimiterTag = (0xFFFE, 0xE0DD) # end of Sequence of undefined length + +# Define set of groups that we're interested in (so we can quickly skip others) +GROUPS = set([key[0] for key in MINIDICT.keys()]) +VRS = set([val[1] for val in MINIDICT.values()]) + + +class NotADicomFile(Exception): + pass + + +class CompressedDicom(RuntimeError): + pass + + +class SimpleDicomReader(object): + """ + This class provides reading of pixel data from DICOM files. It is + focussed on getting the pixel data, not the meta info. + + To use, first create an instance of this class (giving it + a file object or filename). Next use the info attribute to + get a dict of the meta data. The loading of pixel data is + deferred until get_numpy_array() is called. + + Comparison with Pydicom + ----------------------- + + This code focusses on getting the pixel data out, which allows some + shortcuts, resulting in the code being much smaller. + + Since the processing of data elements is much cheaper (it skips a lot + of tags), this code is about 3x faster than pydicom (except for the + deflated DICOM files). + + This class does borrow some code (and ideas) from the pydicom + project, and (to the best of our knowledge) has the same limitations + as pydicom with regard to the type of files that it can handle. + + Limitations + ----------- + + For more advanced DICOM processing, please check out pydicom. + + * Only a predefined subset of data elements (meta information) is read. + * This is a reader; it can not write DICOM files. + * (just like pydicom) it can handle none of the compressed DICOM + formats except for "Deflated Explicit VR Little Endian" + (1.2.840.10008.1.2.1.99). + + """ + + def __init__(self, file): + # Open file if filename given + if isinstance(file, str): + self._filename = file + self._file = open(file, "rb") + else: + self._filename = "" + self._file = file + # Init variable to store position and size of pixel data + self._pixel_data_loc = None + # The meta header is always explicit and little endian + self.is_implicit_VR = False + self.is_little_endian = True + self._unpackPrefix = "<" + # Dict to store data elements of interest in + self._info = {} + # VR Conversion + self._converters = { + # Numbers + "US": lambda x: self._unpack("H", x), + "UL": lambda x: self._unpack("L", x), + # Numbers encoded as strings + "DS": lambda x: self._splitValues(x, float, "\\"), + "IS": lambda x: self._splitValues(x, int, "\\"), + # strings + "AS": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "DA": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "TM": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "UI": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "LO": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(), + "CS": lambda x: self._splitValues(x, float, "\\"), + "PN": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(), + } + + # Initiate reading + self._read() + + @property + def info(self): + return self._info + + def _splitValues(self, x, type, splitter): + s = x.decode("ascii").strip("\x00") + try: + if splitter in s: + return tuple([type(v) for v in s.split(splitter) if v.strip()]) + else: + return type(s) + except ValueError: + return s + + def _unpack(self, fmt, value): + return struct.unpack(self._unpackPrefix + fmt, value)[0] + + # Really only so we need minimal changes to _pixel_data_numpy + def __iter__(self): + return iter(self._info.keys()) + + def __getattr__(self, key): + info = object.__getattribute__(self, "_info") + if key in info: + return info[key] + return object.__getattribute__(self, key) # pragma: no cover + + def _read(self): + f = self._file + # Check prefix after peamble + f.seek(128) + if f.read(4) != b"DICM": + raise NotADicomFile("Not a valid DICOM file.") + # Read + self._read_header() + self._read_data_elements() + self._get_shape_and_sampling() + # Close if done, reopen if necessary to read pixel data + if os.path.isfile(self._filename): + self._file.close() + self._file = None + + def _readDataElement(self): + f = self._file + # Get group and element + group = self._unpack("H", f.read(2)) + element = self._unpack("H", f.read(2)) + # Get value length + if self.is_implicit_VR: + vl = self._unpack("I", f.read(4)) + else: + vr = f.read(2) + if vr in (b"OB", b"OW", b"SQ", b"UN"): + reserved = f.read(2) # noqa + vl = self._unpack("I", f.read(4)) + else: + vl = self._unpack("H", f.read(2)) + # Get value + if group == 0x7FE0 and element == 0x0010: + here = f.tell() + self._pixel_data_loc = here, vl + f.seek(here + vl) + return group, element, b"Deferred loading of pixel data" + else: + if vl == 0xFFFFFFFF: + value = self._read_undefined_length_value() + else: + value = f.read(vl) + return group, element, value + + def _read_undefined_length_value(self, read_size=128): + """Copied (in compacted form) from PyDicom + Copyright Darcy Mason. + """ + fp = self._file + # data_start = fp.tell() + search_rewind = 3 + bytes_to_find = struct.pack( + self._unpackPrefix + "HH", SequenceDelimiterTag[0], SequenceDelimiterTag[1] + ) + + found = False + value_chunks = [] + while not found: + chunk_start = fp.tell() + bytes_read = fp.read(read_size) + if len(bytes_read) < read_size: + # try again, + # if still don't get required amount, this is last block + new_bytes = fp.read(read_size - len(bytes_read)) + bytes_read += new_bytes + if len(bytes_read) < read_size: + raise EOFError( + "End of file reached before sequence " "delimiter found." + ) + index = bytes_read.find(bytes_to_find) + if index != -1: + found = True + value_chunks.append(bytes_read[:index]) + fp.seek(chunk_start + index + 4) # rewind to end of delimiter + length = fp.read(4) + if length != b"\0\0\0\0": + logger.warning( + "Expected 4 zero bytes after undefined length " "delimiter" + ) + else: + fp.seek(fp.tell() - search_rewind) # rewind a bit + # accumulate the bytes read (not including the rewind) + value_chunks.append(bytes_read[:-search_rewind]) + + # if get here then have found the byte string + return b"".join(value_chunks) + + def _read_header(self): + f = self._file + TransferSyntaxUID = None + + # Read all elements, store transferSyntax when we encounter it + try: + while True: + fp_save = f.tell() + # Get element + group, element, value = self._readDataElement() + if group == 0x02: + if group == 0x02 and element == 0x10: + TransferSyntaxUID = value.decode("ascii").strip("\x00") + else: + # No more group 2: rewind and break + # (don't trust group length) + f.seek(fp_save) + break + except (EOFError, struct.error): # pragma: no cover + raise RuntimeError("End of file reached while still in header.") + + # Handle transfer syntax + self._info["TransferSyntaxUID"] = TransferSyntaxUID + # + if TransferSyntaxUID is None: + # Assume ExplicitVRLittleEndian + is_implicit_VR, is_little_endian = False, True + elif TransferSyntaxUID == "1.2.840.10008.1.2.1": + # ExplicitVRLittleEndian + is_implicit_VR, is_little_endian = False, True + elif TransferSyntaxUID == "1.2.840.10008.1.2.2": + # ExplicitVRBigEndian + is_implicit_VR, is_little_endian = False, False + elif TransferSyntaxUID == "1.2.840.10008.1.2": + # implicit VR little endian + is_implicit_VR, is_little_endian = True, True + elif TransferSyntaxUID == "1.2.840.10008.1.2.1.99": + # DeflatedExplicitVRLittleEndian: + is_implicit_VR, is_little_endian = False, True + self._inflate() + else: + # http://www.dicomlibrary.com/dicom/transfer-syntax/ + t, extra_info = TransferSyntaxUID, "" + if "1.2.840.10008.1.2.4.50" <= t < "1.2.840.10008.1.2.4.99": + extra_info = " (JPEG)" + if "1.2.840.10008.1.2.4.90" <= t < "1.2.840.10008.1.2.4.99": + extra_info = " (JPEG 2000)" + if t == "1.2.840.10008.1.2.5": + extra_info = " (RLE)" + if t == "1.2.840.10008.1.2.6.1": + extra_info = " (RFC 2557)" + raise CompressedDicom( + "The dicom reader can only read files with " + "uncompressed image data - not %r%s. You " + "can try using dcmtk or gdcm to convert the " + "image." % (t, extra_info) + ) + + # From hereon, use implicit/explicit big/little endian + self.is_implicit_VR = is_implicit_VR + self.is_little_endian = is_little_endian + self._unpackPrefix = "><"[is_little_endian] + + def _read_data_elements(self): + info = self._info + try: + while True: + # Get element + group, element, value = self._readDataElement() + # Is it a group we are interested in? + if group in GROUPS: + key = (group, element) + name, vr = MINIDICT.get(key, (None, None)) + # Is it an element we are interested in? + if name: + # Store value + converter = self._converters.get(vr, lambda x: x) + info[name] = converter(value) + except (EOFError, struct.error): + pass # end of file ... + + def get_numpy_array(self): + """Get numpy arra for this DICOM file, with the correct shape, + and pixel values scaled appropriately. + """ + # Is there pixel data at all? + if "PixelData" not in self: + raise TypeError("No pixel data found in this dataset.") + + # Load it now if it was not already loaded + if self._pixel_data_loc and len(self.PixelData) < 100: + # Reopen file? + close_file = False + if self._file is None: + close_file = True + self._file = open(self._filename, "rb") + # Read data + self._file.seek(self._pixel_data_loc[0]) + if self._pixel_data_loc[1] == 0xFFFFFFFF: + value = self._read_undefined_length_value() + else: + value = self._file.read(self._pixel_data_loc[1]) + # Close file + if close_file: + self._file.close() + self._file = None + # Overwrite + self._info["PixelData"] = value + + # Get data + data = self._pixel_data_numpy() + data = self._apply_slope_and_offset(data) + + # Remove data again to preserve memory + # Note that the data for the original file is loaded twice ... + self._info["PixelData"] = ( + b"Data converted to numpy array, " + b"raw data removed to preserve memory" + ) + return data + + def _get_shape_and_sampling(self): + """Get shape and sampling without actuall using the pixel data. + In this way, the user can get an idea what's inside without having + to load it. + """ + # Get shape (in the same way that pydicom does) + if "NumberOfFrames" in self and self.NumberOfFrames > 1: + if self.SamplesPerPixel > 1: + shape = ( + self.SamplesPerPixel, + self.NumberOfFrames, + self.Rows, + self.Columns, + ) + else: + shape = self.NumberOfFrames, self.Rows, self.Columns + elif "SamplesPerPixel" in self: + if self.SamplesPerPixel > 1: + if self.BitsAllocated == 8: + shape = self.SamplesPerPixel, self.Rows, self.Columns + else: + raise NotImplementedError( + "DICOM plugin only handles " + "SamplesPerPixel > 1 if Bits " + "Allocated = 8" + ) + else: + shape = self.Rows, self.Columns + else: + raise RuntimeError( + "DICOM file has no SamplesPerPixel " "(perhaps this is a report?)" + ) + + # Try getting sampling between pixels + if "PixelSpacing" in self: + sampling = float(self.PixelSpacing[0]), float(self.PixelSpacing[1]) + else: + sampling = 1.0, 1.0 + if "SliceSpacing" in self: + sampling = (abs(self.SliceSpacing),) + sampling + + # Ensure that sampling has as many elements as shape + sampling = (1.0,) * (len(shape) - len(sampling)) + sampling[-len(shape) :] + + # Set shape and sampling + self._info["shape"] = shape + self._info["sampling"] = sampling + + def _pixel_data_numpy(self): + """Return a NumPy array of the pixel data.""" + # Taken from pydicom + # Copyright (c) 2008-2012 Darcy Mason + + if "PixelData" not in self: + raise TypeError("No pixel data found in this dataset.") + + # determine the type used for the array + need_byteswap = self.is_little_endian != sys_is_little_endian + + # Make NumPy format code, e.g. "uint16", "int32" etc + # from two pieces of info: + # self.PixelRepresentation -- 0 for unsigned, 1 for signed; + # self.BitsAllocated -- 8, 16, or 32 + format_str = "%sint%d" % ( + ("u", "")[self.PixelRepresentation], + self.BitsAllocated, + ) + try: + numpy_format = np.dtype(format_str) + except TypeError: # pragma: no cover + raise TypeError( + "Data type not understood by NumPy: format='%s', " + " PixelRepresentation=%d, BitsAllocated=%d" + % (numpy_format, self.PixelRepresentation, self.BitsAllocated) + ) + + # Have correct Numpy format, so create the NumPy array + arr = np.frombuffer(self.PixelData, numpy_format).copy() + + # XXX byte swap - may later handle this in read_file!!? + if need_byteswap: + arr.byteswap(True) # True means swap in-place, don't make new copy + + # Note the following reshape operations return a new *view* onto arr, + # but don't copy the data + arr = arr.reshape(*self._info["shape"]) + return arr + + def _apply_slope_and_offset(self, data): + """ + If RescaleSlope and RescaleIntercept are present in the data, + apply them. The data type of the data is changed if necessary. + """ + # Obtain slope and offset + slope, offset = 1, 0 + needFloats, needApplySlopeOffset = False, False + if "RescaleSlope" in self: + needApplySlopeOffset = True + slope = self.RescaleSlope + if "RescaleIntercept" in self: + needApplySlopeOffset = True + offset = self.RescaleIntercept + if int(slope) != slope or int(offset) != offset: + needFloats = True + if not needFloats: + slope, offset = int(slope), int(offset) + + # Apply slope and offset + if needApplySlopeOffset: + # Maybe we need to change the datatype? + if data.dtype in [np.float32, np.float64]: + pass + elif needFloats: + data = data.astype(np.float32) + else: + # Determine required range + minReq, maxReq = data.min().item(), data.max().item() + minReq = min([minReq, minReq * slope + offset, maxReq * slope + offset]) + maxReq = max([maxReq, minReq * slope + offset, maxReq * slope + offset]) + + # Determine required datatype from that + dtype = None + if minReq < 0: + # Signed integer type + maxReq = max([-minReq, maxReq]) + if maxReq < 2**7: + dtype = np.int8 + elif maxReq < 2**15: + dtype = np.int16 + elif maxReq < 2**31: + dtype = np.int32 + else: + dtype = np.float32 + else: + # Unsigned integer type + if maxReq < 2**8: + dtype = np.int8 + elif maxReq < 2**16: + dtype = np.int16 + elif maxReq < 2**32: + dtype = np.int32 + else: + dtype = np.float32 + # Change datatype + if dtype != data.dtype: + data = data.astype(dtype) + + # Apply slope and offset + data *= slope + data += offset + + # Done + return data + + def _inflate(self): + # Taken from pydicom + # Copyright (c) 2008-2012 Darcy Mason + import zlib + from io import BytesIO + + # See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset + # following the file metadata was prepared the normal way, + # then "deflate" compression applied. + # All that is needed here is to decompress and then + # use as normal in a file-like object + zipped = self._file.read() + # -MAX_WBITS part is from comp.lang.python answer: + # groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799 + unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS) + self._file = BytesIO(unzipped) # a file-like object + + +class DicomSeries(object): + """DicomSeries + This class represents a serie of dicom files (SimpleDicomReader + objects) that belong together. If these are multiple files, they + represent the slices of a volume (like for CT or MRI). + """ + + def __init__(self, suid, progressIndicator): + # Init dataset list and the callback + self._entries = [] + + # Init props + self._suid = suid + self._info = {} + self._progressIndicator = progressIndicator + + def __len__(self): + return len(self._entries) + + def __iter__(self): + return iter(self._entries) + + def __getitem__(self, index): + return self._entries[index] + + @property + def suid(self): + return self._suid + + @property + def shape(self): + """The shape of the data (nz, ny, nx).""" + return self._info["shape"] + + @property + def sampling(self): + """The sampling (voxel distances) of the data (dz, dy, dx).""" + return self._info["sampling"] + + @property + def info(self): + """A dictionary containing the information as present in the + first dicomfile of this serie. None if there are no entries.""" + return self._info + + @property + def description(self): + """A description of the dicom series. Used fields are + PatientName, shape of the data, SeriesDescription, and + ImageComments. + """ + info = self.info + + # If no info available, return simple description + if not info: # pragma: no cover + return "DicomSeries containing %i images" % len(self) + + fields = [] + # Give patient name + if "PatientName" in info: + fields.append("" + info["PatientName"]) + # Also add dimensions + if self.shape: + tmp = [str(d) for d in self.shape] + fields.append("x".join(tmp)) + # Try adding more fields + if "SeriesDescription" in info: + fields.append("'" + info["SeriesDescription"] + "'") + if "ImageComments" in info: + fields.append("'" + info["ImageComments"] + "'") + + # Combine + return " ".join(fields) + + def __repr__(self): + adr = hex(id(self)).upper() + return "" % (len(self), adr) + + def get_numpy_array(self): + """Get (load) the data that this DicomSeries represents, and return + it as a numpy array. If this serie contains multiple images, the + resulting array is 3D, otherwise it's 2D. + """ + + # It's easy if no file or if just a single file + if len(self) == 0: + raise ValueError("Serie does not contain any files.") + elif len(self) == 1: + return self[0].get_numpy_array() + + # Check info + if self.info is None: + raise RuntimeError("Cannot return volume if series not finished.") + + # Init data (using what the dicom packaged produces as a reference) + slice = self[0].get_numpy_array() + vol = np.zeros(self.shape, dtype=slice.dtype) + vol[0] = slice + + # Fill volume + self._progressIndicator.start("loading data", "", len(self)) + for z in range(1, len(self)): + vol[z] = self[z].get_numpy_array() + self._progressIndicator.set_progress(z + 1) + self._progressIndicator.finish() + + # Done + import gc + + gc.collect() + return vol + + def _append(self, dcm): + self._entries.append(dcm) + + def _sort(self): + self._entries.sort( + key=lambda k: ( + k.InstanceNumber, + ( + k.ImagePositionPatient[2] + if hasattr(k, "ImagePositionPatient") + else None + ), + ) + ) + + def _finish(self): + """ + Evaluate the series of dicom files. Together they should make up + a volumetric dataset. This means the files should meet certain + conditions. Also some additional information has to be calculated, + such as the distance between the slices. This method sets the + attributes for "shape", "sampling" and "info". + + This method checks: + * that there are no missing files + * that the dimensions of all images match + * that the pixel spacing of all images match + """ + + # The datasets list should be sorted by instance number + L = self._entries + if len(L) == 0: + return + elif len(L) == 1: + self._info = L[0].info + return + + # Get previous + ds1 = L[0] + # Init measures to calculate average of + distance_sum = 0.0 + # Init measures to check (these are in 2D) + dimensions = ds1.Rows, ds1.Columns + # sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1]) + sampling = ds1.info["sampling"][:2] # row, column + + for index in range(len(L)): + # The first round ds1 and ds2 will be the same, for the + # distance calculation this does not matter + # Get current + ds2 = L[index] + # Get positions + pos1 = float(ds1.ImagePositionPatient[2]) + pos2 = float(ds2.ImagePositionPatient[2]) + # Update distance_sum to calculate distance later + distance_sum += abs(pos1 - pos2) + # Test measures + dimensions2 = ds2.Rows, ds2.Columns + # sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1]) + sampling2 = ds2.info["sampling"][:2] # row, column + if dimensions != dimensions2: + # We cannot produce a volume if the dimensions match + raise ValueError("Dimensions of slices does not match.") + if sampling != sampling2: + # We can still produce a volume, but we should notify the user + self._progressIndicator.write("Warn: sampling does not match.") + # Store previous + ds1 = ds2 + + # Finish calculating average distance + # (Note that there are len(L)-1 distances) + distance_mean = distance_sum / (len(L) - 1) + + # Set info dict + self._info = L[0].info.copy() + + # Store information that is specific for the serie + self._info["shape"] = (len(L),) + ds2.info["shape"] + self._info["sampling"] = (distance_mean,) + ds2.info["sampling"] + + +def list_files(files, path): + """List all files in the directory, recursively.""" + for item in os.listdir(path): + item = os.path.join(path, item) + if os.path.isdir(item): + list_files(files, item) + elif os.path.isfile(item): + files.append(item) + + +def process_directory(request, progressIndicator, readPixelData=False): + """ + Reads dicom files and returns a list of DicomSeries objects, which + contain information about the data, and can be used to load the + image or volume data. + + if readPixelData is True, the pixel data of all series is read. By + default the loading of pixeldata is deferred until it is requested + using the DicomSeries.get_pixel_array() method. In general, both + methods should be equally fast. + """ + # Get directory to examine + if os.path.isdir(request.filename): + path = request.filename + elif os.path.isfile(request.filename): + path = os.path.dirname(request.filename) + else: # pragma: no cover - tested earlier + raise ValueError("Dicom plugin needs a valid filename to examine the directory") + + # Check files + files = [] + list_files(files, path) # Find files recursively + + # Gather file data and put in DicomSeries + series = {} + count = 0 + progressIndicator.start("examining files", "files", len(files)) + for filename in files: + # Show progress (note that we always start with a 0.0) + count += 1 + progressIndicator.set_progress(count) + # Skip DICOMDIR files + if filename.count("DICOMDIR"): # pragma: no cover + continue + # Try loading dicom ... + try: + dcm = SimpleDicomReader(filename) + except NotADicomFile: + continue # skip non-dicom file + except Exception as why: # pragma: no cover + progressIndicator.write(str(why)) + continue + # Get SUID and register the file with an existing or new series object + try: + suid = dcm.SeriesInstanceUID + except AttributeError: # pragma: no cover + continue # some other kind of dicom file + if suid not in series: + series[suid] = DicomSeries(suid, progressIndicator) + series[suid]._append(dcm) + + # Finish progress + # progressIndicator.finish('Found %i series.' % len(series)) + + # Make a list and sort, so that the order is deterministic + series = list(series.values()) + series.sort(key=lambda x: x.suid) + + # Split series if necessary + for serie in reversed([serie for serie in series]): + splitSerieIfRequired(serie, series, progressIndicator) + + # Finish all series + # progressIndicator.start('analyse series', '', len(series)) + series_ = [] + for i in range(len(series)): + try: + series[i]._finish() + series_.append(series[i]) + except Exception as err: # pragma: no cover + progressIndicator.write(str(err)) + pass # Skip serie (probably report-like file without pixels) + # progressIndicator.set_progress(i+1) + progressIndicator.finish("Found %i correct series." % len(series_)) + + # Done + return series_ + + +def splitSerieIfRequired(serie, series, progressIndicator): + """ + Split the serie in multiple series if this is required. The choice + is based on examing the image position relative to the previous + image. If it differs too much, it is assumed that there is a new + dataset. This can happen for example in unspitted gated CT data. + """ + + # Sort the original list and get local name + serie._sort() + L = serie._entries + # Init previous slice + ds1 = L[0] + # Check whether we can do this + if "ImagePositionPatient" not in ds1: + return + # Initialize a list of new lists + L2 = [[ds1]] + # Init slice distance estimate + distance = 0 + + for index in range(1, len(L)): + # Get current slice + ds2 = L[index] + # Get positions + pos1 = float(ds1.ImagePositionPatient[2]) + pos2 = float(ds2.ImagePositionPatient[2]) + # Get distances + newDist = abs(pos1 - pos2) + # deltaDist = abs(firstPos-pos2) + # If the distance deviates more than 2x from what we've seen, + # we can agree it's a new dataset. + if distance and newDist > 2.1 * distance: + L2.append([]) + distance = 0 + else: + # Test missing file + if distance and newDist > 1.5 * distance: + progressIndicator.write( + "Warning: missing file after %r" % ds1._filename + ) + distance = newDist + # Add to last list + L2[-1].append(ds2) + # Store previous + ds1 = ds2 + + # Split if we should + if len(L2) > 1: + # At what position are we now? + i = series.index(serie) + # Create new series + series2insert = [] + for L in L2: + newSerie = DicomSeries(serie.suid, progressIndicator) + newSerie._entries = L + series2insert.append(newSerie) + # Insert series and remove self + for newSerie in reversed(series2insert): + series.insert(i, newSerie) + series.remove(serie) diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/_freeimage.py b/parrot/lib/python3.10/site-packages/imageio/plugins/_freeimage.py new file mode 100644 index 0000000000000000000000000000000000000000..99f7cbb1b2b05dd23c7a976bbc9bf31b85af0f38 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/_freeimage.py @@ -0,0 +1,1312 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +# styletest: ignore E261 + +""" Module imageio/freeimage.py + +This module contains the wrapper code for the freeimage library. +The functions defined in this module are relatively thin; just thin +enough so that arguments and results are native Python/numpy data +types. + +""" + +import os +import sys +import ctypes +import threading +import logging +import numpy + +from ..core import ( + get_remote_file, + load_lib, + Dict, + resource_dirs, + IS_PYPY, + get_platform, + InternetNotAllowedError, + NeedDownloadError, +) + +logger = logging.getLogger(__name__) + +TEST_NUMPY_NO_STRIDES = False # To test pypy fallback + +FNAME_PER_PLATFORM = { + "osx32": "libfreeimage-3.16.0-osx10.6.dylib", # universal library + "osx64": "libfreeimage-3.16.0-osx10.6.dylib", + "win32": "FreeImage-3.18.0-win32.dll", + "win64": "FreeImage-3.18.0-win64.dll", + "linux32": "libfreeimage-3.16.0-linux32.so", + "linux64": "libfreeimage-3.16.0-linux64.so", +} + + +def download(directory=None, force_download=False): + """Download the FreeImage library to your computer. + + Parameters + ---------- + directory : str | None + The directory where the file will be cached if a download was + required to obtain the file. By default, the appdata directory + is used. This is also the first directory that is checked for + a local version of the file. + force_download : bool | str + If True, the file will be downloaded even if a local copy exists + (and this copy will be overwritten). Can also be a YYYY-MM-DD date + to ensure a file is up-to-date (modified date of a file on disk, + if present, is checked). + """ + plat = get_platform() + if plat and plat in FNAME_PER_PLATFORM: + fname = "freeimage/" + FNAME_PER_PLATFORM[plat] + get_remote_file(fname=fname, directory=directory, force_download=force_download) + fi._lib = None # allow trying again (needed to make tests work) + + +def get_freeimage_lib(): + """Ensure we have our version of the binary freeimage lib.""" + + lib = os.getenv("IMAGEIO_FREEIMAGE_LIB", None) + if lib: # pragma: no cover + return lib + + # Get filename to load + # If we do not provide a binary, the system may still do ... + plat = get_platform() + if plat and plat in FNAME_PER_PLATFORM: + try: + return get_remote_file("freeimage/" + FNAME_PER_PLATFORM[plat], auto=False) + except InternetNotAllowedError: + pass + except NeedDownloadError: + raise NeedDownloadError( + "Need FreeImage library. " + "You can obtain it with either:\n" + " - download using the command: " + "imageio_download_bin freeimage\n" + " - download by calling (in Python): " + "imageio.plugins.freeimage.download()\n" + ) + except RuntimeError as e: # pragma: no cover + logger.warning(str(e)) + + +# Define function to encode a filename to bytes (for the current system) +def efn(x): + return x.encode(sys.getfilesystemencoding()) + + +# 4-byte quads of 0,v,v,v from 0,0,0,0 to 0,255,255,255 +GREY_PALETTE = numpy.arange(0, 0x01000000, 0x00010101, dtype=numpy.uint32) + + +class FI_TYPES(object): + FIT_UNKNOWN = 0 + FIT_BITMAP = 1 + FIT_UINT16 = 2 + FIT_INT16 = 3 + FIT_UINT32 = 4 + FIT_INT32 = 5 + FIT_FLOAT = 6 + FIT_DOUBLE = 7 + FIT_COMPLEX = 8 + FIT_RGB16 = 9 + FIT_RGBA16 = 10 + FIT_RGBF = 11 + FIT_RGBAF = 12 + + dtypes = { + FIT_BITMAP: numpy.uint8, + FIT_UINT16: numpy.uint16, + FIT_INT16: numpy.int16, + FIT_UINT32: numpy.uint32, + FIT_INT32: numpy.int32, + FIT_FLOAT: numpy.float32, + FIT_DOUBLE: numpy.float64, + FIT_COMPLEX: numpy.complex128, + FIT_RGB16: numpy.uint16, + FIT_RGBA16: numpy.uint16, + FIT_RGBF: numpy.float32, + FIT_RGBAF: numpy.float32, + } + + fi_types = { + (numpy.uint8, 1): FIT_BITMAP, + (numpy.uint8, 3): FIT_BITMAP, + (numpy.uint8, 4): FIT_BITMAP, + (numpy.uint16, 1): FIT_UINT16, + (numpy.int16, 1): FIT_INT16, + (numpy.uint32, 1): FIT_UINT32, + (numpy.int32, 1): FIT_INT32, + (numpy.float32, 1): FIT_FLOAT, + (numpy.float64, 1): FIT_DOUBLE, + (numpy.complex128, 1): FIT_COMPLEX, + (numpy.uint16, 3): FIT_RGB16, + (numpy.uint16, 4): FIT_RGBA16, + (numpy.float32, 3): FIT_RGBF, + (numpy.float32, 4): FIT_RGBAF, + } + + extra_dims = { + FIT_UINT16: [], + FIT_INT16: [], + FIT_UINT32: [], + FIT_INT32: [], + FIT_FLOAT: [], + FIT_DOUBLE: [], + FIT_COMPLEX: [], + FIT_RGB16: [3], + FIT_RGBA16: [4], + FIT_RGBF: [3], + FIT_RGBAF: [4], + } + + +class IO_FLAGS(object): + FIF_LOAD_NOPIXELS = 0x8000 # loading: load the image header only + # # (not supported by all plugins) + BMP_DEFAULT = 0 + BMP_SAVE_RLE = 1 + CUT_DEFAULT = 0 + DDS_DEFAULT = 0 + EXR_DEFAULT = 0 # save data as half with piz-based wavelet compression + EXR_FLOAT = 0x0001 # save data as float instead of half (not recommended) + EXR_NONE = 0x0002 # save with no compression + EXR_ZIP = 0x0004 # save with zlib compression, in blocks of 16 scan lines + EXR_PIZ = 0x0008 # save with piz-based wavelet compression + EXR_PXR24 = 0x0010 # save with lossy 24-bit float compression + EXR_B44 = 0x0020 # save with lossy 44% float compression + # # - goes to 22% when combined with EXR_LC + EXR_LC = 0x0040 # save images with one luminance and two chroma channels, + # # rather than as RGB (lossy compression) + FAXG3_DEFAULT = 0 + GIF_DEFAULT = 0 + GIF_LOAD256 = 1 # Load the image as a 256 color image with ununsed + # # palette entries, if it's 16 or 2 color + GIF_PLAYBACK = 2 # 'Play' the GIF to generate each frame (as 32bpp) + # # instead of returning raw frame data when loading + HDR_DEFAULT = 0 + ICO_DEFAULT = 0 + ICO_MAKEALPHA = 1 # convert to 32bpp and create an alpha channel from the + # # AND-mask when loading + IFF_DEFAULT = 0 + J2K_DEFAULT = 0 # save with a 16:1 rate + JP2_DEFAULT = 0 # save with a 16:1 rate + JPEG_DEFAULT = 0 # loading (see JPEG_FAST); + # # saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420) + JPEG_FAST = 0x0001 # load the file as fast as possible, + # # sacrificing some quality + JPEG_ACCURATE = 0x0002 # load the file with the best quality, + # # sacrificing some speed + JPEG_CMYK = 0x0004 # load separated CMYK "as is" + # # (use | to combine with other load flags) + JPEG_EXIFROTATE = 0x0008 # load and rotate according to + # # Exif 'Orientation' tag if available + JPEG_QUALITYSUPERB = 0x80 # save with superb quality (100:1) + JPEG_QUALITYGOOD = 0x0100 # save with good quality (75:1) + JPEG_QUALITYNORMAL = 0x0200 # save with normal quality (50:1) + JPEG_QUALITYAVERAGE = 0x0400 # save with average quality (25:1) + JPEG_QUALITYBAD = 0x0800 # save with bad quality (10:1) + JPEG_PROGRESSIVE = 0x2000 # save as a progressive-JPEG + # # (use | to combine with other save flags) + JPEG_SUBSAMPLING_411 = 0x1000 # save with high 4x1 chroma + # # subsampling (4:1:1) + JPEG_SUBSAMPLING_420 = 0x4000 # save with medium 2x2 medium chroma + # # subsampling (4:2:0) - default value + JPEG_SUBSAMPLING_422 = 0x8000 # save /w low 2x1 chroma subsampling (4:2:2) + JPEG_SUBSAMPLING_444 = 0x10000 # save with no chroma subsampling (4:4:4) + JPEG_OPTIMIZE = 0x20000 # on saving, compute optimal Huffman coding tables + # # (can reduce a few percent of file size) + JPEG_BASELINE = 0x40000 # save basic JPEG, without metadata or any markers + KOALA_DEFAULT = 0 + LBM_DEFAULT = 0 + MNG_DEFAULT = 0 + PCD_DEFAULT = 0 + PCD_BASE = 1 # load the bitmap sized 768 x 512 + PCD_BASEDIV4 = 2 # load the bitmap sized 384 x 256 + PCD_BASEDIV16 = 3 # load the bitmap sized 192 x 128 + PCX_DEFAULT = 0 + PFM_DEFAULT = 0 + PICT_DEFAULT = 0 + PNG_DEFAULT = 0 + PNG_IGNOREGAMMA = 1 # loading: avoid gamma correction + PNG_Z_BEST_SPEED = 0x0001 # save using ZLib level 1 compression flag + # # (default value is 6) + PNG_Z_DEFAULT_COMPRESSION = 0x0006 # save using ZLib level 6 compression + # # flag (default recommended value) + PNG_Z_BEST_COMPRESSION = 0x0009 # save using ZLib level 9 compression flag + # # (default value is 6) + PNG_Z_NO_COMPRESSION = 0x0100 # save without ZLib compression + PNG_INTERLACED = 0x0200 # save using Adam7 interlacing (use | to combine + # # with other save flags) + PNM_DEFAULT = 0 + PNM_SAVE_RAW = 0 # Writer saves in RAW format (i.e. P4, P5 or P6) + PNM_SAVE_ASCII = 1 # Writer saves in ASCII format (i.e. P1, P2 or P3) + PSD_DEFAULT = 0 + PSD_CMYK = 1 # reads tags for separated CMYK (default is conversion to RGB) + PSD_LAB = 2 # reads tags for CIELab (default is conversion to RGB) + RAS_DEFAULT = 0 + RAW_DEFAULT = 0 # load the file as linear RGB 48-bit + RAW_PREVIEW = 1 # try to load the embedded JPEG preview with included + # # Exif Data or default to RGB 24-bit + RAW_DISPLAY = 2 # load the file as RGB 24-bit + SGI_DEFAULT = 0 + TARGA_DEFAULT = 0 + TARGA_LOAD_RGB888 = 1 # Convert RGB555 and ARGB8888 -> RGB888. + TARGA_SAVE_RLE = 2 # Save with RLE compression + TIFF_DEFAULT = 0 + TIFF_CMYK = 0x0001 # reads/stores tags for separated CMYK + # # (use | to combine with compression flags) + TIFF_PACKBITS = 0x0100 # save using PACKBITS compression + TIFF_DEFLATE = 0x0200 # save using DEFLATE (a.k.a. ZLIB) compression + TIFF_ADOBE_DEFLATE = 0x0400 # save using ADOBE DEFLATE compression + TIFF_NONE = 0x0800 # save without any compression + TIFF_CCITTFAX3 = 0x1000 # save using CCITT Group 3 fax encoding + TIFF_CCITTFAX4 = 0x2000 # save using CCITT Group 4 fax encoding + TIFF_LZW = 0x4000 # save using LZW compression + TIFF_JPEG = 0x8000 # save using JPEG compression + TIFF_LOGLUV = 0x10000 # save using LogLuv compression + WBMP_DEFAULT = 0 + XBM_DEFAULT = 0 + XPM_DEFAULT = 0 + + +class METADATA_MODELS(object): + FIMD_COMMENTS = 0 + FIMD_EXIF_MAIN = 1 + FIMD_EXIF_EXIF = 2 + FIMD_EXIF_GPS = 3 + FIMD_EXIF_MAKERNOTE = 4 + FIMD_EXIF_INTEROP = 5 + FIMD_IPTC = 6 + FIMD_XMP = 7 + FIMD_GEOTIFF = 8 + FIMD_ANIMATION = 9 + + +class METADATA_DATATYPE(object): + FIDT_BYTE = 1 # 8-bit unsigned integer + FIDT_ASCII = 2 # 8-bit bytes w/ last byte null + FIDT_SHORT = 3 # 16-bit unsigned integer + FIDT_LONG = 4 # 32-bit unsigned integer + FIDT_RATIONAL = 5 # 64-bit unsigned fraction + FIDT_SBYTE = 6 # 8-bit signed integer + FIDT_UNDEFINED = 7 # 8-bit untyped data + FIDT_SSHORT = 8 # 16-bit signed integer + FIDT_SLONG = 9 # 32-bit signed integer + FIDT_SRATIONAL = 10 # 64-bit signed fraction + FIDT_FLOAT = 11 # 32-bit IEEE floating point + FIDT_DOUBLE = 12 # 64-bit IEEE floating point + FIDT_IFD = 13 # 32-bit unsigned integer (offset) + FIDT_PALETTE = 14 # 32-bit RGBQUAD + FIDT_LONG8 = 16 # 64-bit unsigned integer + FIDT_SLONG8 = 17 # 64-bit signed integer + FIDT_IFD8 = 18 # 64-bit unsigned integer (offset) + + dtypes = { + FIDT_BYTE: numpy.uint8, + FIDT_SHORT: numpy.uint16, + FIDT_LONG: numpy.uint32, + FIDT_RATIONAL: [("numerator", numpy.uint32), ("denominator", numpy.uint32)], + FIDT_LONG8: numpy.uint64, + FIDT_SLONG8: numpy.int64, + FIDT_IFD8: numpy.uint64, + FIDT_SBYTE: numpy.int8, + FIDT_UNDEFINED: numpy.uint8, + FIDT_SSHORT: numpy.int16, + FIDT_SLONG: numpy.int32, + FIDT_SRATIONAL: [("numerator", numpy.int32), ("denominator", numpy.int32)], + FIDT_FLOAT: numpy.float32, + FIDT_DOUBLE: numpy.float64, + FIDT_IFD: numpy.uint32, + FIDT_PALETTE: [ + ("R", numpy.uint8), + ("G", numpy.uint8), + ("B", numpy.uint8), + ("A", numpy.uint8), + ], + } + + +class Freeimage(object): + """Class to represent an interface to the FreeImage library. + This class is relatively thin. It provides a Pythonic API that converts + Freeimage objects to Python objects, but that's about it. + The actual implementation should be provided by the plugins. + + The recommended way to call into the Freeimage library (so that + errors and warnings show up in the right moment) is to use this + object as a context manager: + with imageio.fi as lib: + lib.FreeImage_GetPalette() + + """ + + _API = { + # All we're doing here is telling ctypes that some of the + # FreeImage functions return pointers instead of integers. (On + # 64-bit systems, without this information the pointers get + # truncated and crashes result). There's no need to list + # functions that return ints, or the types of the parameters + # to these or other functions -- that's fine to do implicitly. + # Note that the ctypes immediately converts the returned void_p + # back to a python int again! This is really not helpful, + # because then passing it back to another library call will + # cause truncation-to-32-bits on 64-bit systems. Thanks, ctypes! + # So after these calls one must immediately re-wrap the int as + # a c_void_p if it is to be passed back into FreeImage. + "FreeImage_AllocateT": (ctypes.c_void_p, None), + "FreeImage_FindFirstMetadata": (ctypes.c_void_p, None), + "FreeImage_GetBits": (ctypes.c_void_p, None), + "FreeImage_GetPalette": (ctypes.c_void_p, None), + "FreeImage_GetTagKey": (ctypes.c_char_p, None), + "FreeImage_GetTagValue": (ctypes.c_void_p, None), + "FreeImage_CreateTag": (ctypes.c_void_p, None), + "FreeImage_Save": (ctypes.c_void_p, None), + "FreeImage_Load": (ctypes.c_void_p, None), + "FreeImage_LoadFromMemory": (ctypes.c_void_p, None), + "FreeImage_OpenMultiBitmap": (ctypes.c_void_p, None), + "FreeImage_LoadMultiBitmapFromMemory": (ctypes.c_void_p, None), + "FreeImage_LockPage": (ctypes.c_void_p, None), + "FreeImage_OpenMemory": (ctypes.c_void_p, None), + # 'FreeImage_ReadMemory': (ctypes.c_void_p, None), + # 'FreeImage_CloseMemory': (ctypes.c_void_p, None), + "FreeImage_GetVersion": (ctypes.c_char_p, None), + "FreeImage_GetFIFExtensionList": (ctypes.c_char_p, None), + "FreeImage_GetFormatFromFIF": (ctypes.c_char_p, None), + "FreeImage_GetFIFDescription": (ctypes.c_char_p, None), + "FreeImage_ColorQuantizeEx": (ctypes.c_void_p, None), + # Pypy wants some extra definitions, so here we go ... + "FreeImage_IsLittleEndian": (ctypes.c_int, None), + "FreeImage_SetOutputMessage": (ctypes.c_void_p, None), + "FreeImage_GetFIFCount": (ctypes.c_int, None), + "FreeImage_IsPluginEnabled": (ctypes.c_int, None), + "FreeImage_GetFileType": (ctypes.c_int, None), + # + "FreeImage_GetTagType": (ctypes.c_int, None), + "FreeImage_GetTagLength": (ctypes.c_int, None), + "FreeImage_FindNextMetadata": (ctypes.c_int, None), + "FreeImage_FindCloseMetadata": (ctypes.c_void_p, None), + # + "FreeImage_GetFIFFromFilename": (ctypes.c_int, None), + "FreeImage_FIFSupportsReading": (ctypes.c_int, None), + "FreeImage_FIFSupportsWriting": (ctypes.c_int, None), + "FreeImage_FIFSupportsExportType": (ctypes.c_int, None), + "FreeImage_FIFSupportsExportBPP": (ctypes.c_int, None), + "FreeImage_GetHeight": (ctypes.c_int, None), + "FreeImage_GetWidth": (ctypes.c_int, None), + "FreeImage_GetImageType": (ctypes.c_int, None), + "FreeImage_GetBPP": (ctypes.c_int, None), + "FreeImage_GetColorsUsed": (ctypes.c_int, None), + "FreeImage_ConvertTo32Bits": (ctypes.c_void_p, None), + "FreeImage_GetPitch": (ctypes.c_int, None), + "FreeImage_Unload": (ctypes.c_void_p, None), + } + + def __init__(self): + # Initialize freeimage lib as None + self._lib = None + + # A lock to create thread-safety + self._lock = threading.RLock() + + # Init log messages lists + self._messages = [] + + # Select functype for error handler + if sys.platform.startswith("win"): + functype = ctypes.WINFUNCTYPE + else: + functype = ctypes.CFUNCTYPE + + # Create output message handler + @functype(None, ctypes.c_int, ctypes.c_char_p) + def error_handler(fif, message): + message = message.decode("utf-8") + self._messages.append(message) + while (len(self._messages)) > 256: + self._messages.pop(0) + + # Make sure to keep a ref to function + self._error_handler = error_handler + + @property + def lib(self): + if self._lib is None: + try: + self.load_freeimage() + except OSError as err: + self._lib = "The freeimage library could not be loaded: " + self._lib += str(err) + if isinstance(self._lib, str): + raise RuntimeError(self._lib) + return self._lib + + def has_lib(self): + try: + self.lib + except Exception: + return False + return True + + def load_freeimage(self): + """Try to load the freeimage lib from the system. If not successful, + try to download the imageio version and try again. + """ + # Load library and register API + success = False + try: + # Try without forcing a download, but giving preference + # to the imageio-provided lib (if previously downloaded) + self._load_freeimage() + self._register_api() + if self.lib.FreeImage_GetVersion().decode("utf-8") >= "3.15": + success = True + except OSError: + pass + + if not success: + # Ensure we have our own lib, try again + get_freeimage_lib() + self._load_freeimage() + self._register_api() + + # Wrap up + self.lib.FreeImage_SetOutputMessage(self._error_handler) + self.lib_version = self.lib.FreeImage_GetVersion().decode("utf-8") + + def _load_freeimage(self): + # Define names + lib_names = ["freeimage", "libfreeimage"] + exact_lib_names = [ + "FreeImage", + "libfreeimage.dylib", + "libfreeimage.so", + "libfreeimage.so.3", + ] + # Add names of libraries that we provide (that file may not exist) + res_dirs = resource_dirs() + plat = get_platform() + if plat: # Can be None on e.g. FreeBSD + fname = FNAME_PER_PLATFORM[plat] + for dir in res_dirs: + exact_lib_names.insert(0, os.path.join(dir, "freeimage", fname)) + + # Add the path specified with IMAGEIO_FREEIMAGE_LIB: + lib = os.getenv("IMAGEIO_FREEIMAGE_LIB", None) + if lib is not None: + exact_lib_names.insert(0, lib) + + # Load + try: + lib, fname = load_lib(exact_lib_names, lib_names, res_dirs) + except OSError as err: # pragma: no cover + err_msg = str(err) + "\nPlease install the FreeImage library." + raise OSError(err_msg) + + # Store + self._lib = lib + self.lib_fname = fname + + def _register_api(self): + # Albert's ctypes pattern + for f, (restype, argtypes) in self._API.items(): + func = getattr(self.lib, f) + func.restype = restype + func.argtypes = argtypes + + # Handling of output messages + + def __enter__(self): + self._lock.acquire() + return self.lib + + def __exit__(self, *args): + self._show_any_warnings() + self._lock.release() + + def _reset_log(self): + """Reset the list of output messages. Call this before + loading or saving an image with the FreeImage API. + """ + self._messages = [] + + def _get_error_message(self): + """Get the output messages produced since the last reset as + one string. Returns 'No known reason.' if there are no messages. + Also resets the log. + """ + if self._messages: + res = " ".join(self._messages) + self._reset_log() + return res + else: + return "No known reason." + + def _show_any_warnings(self): + """If there were any messages since the last reset, show them + as a warning. Otherwise do nothing. Also resets the messages. + """ + if self._messages: + logger.warning("imageio.freeimage warning: " + self._get_error_message()) + self._reset_log() + + def get_output_log(self): + """Return a list of the last 256 output messages + (warnings and errors) produced by the FreeImage library. + """ + # This message log is not cleared/reset, but kept to 256 elements. + return [m for m in self._messages] + + def getFIF(self, filename, mode, bb=None): + """Get the freeimage Format (FIF) from a given filename. + If mode is 'r', will try to determine the format by reading + the file, otherwise only the filename is used. + + This function also tests whether the format supports reading/writing. + """ + with self as lib: + # Init + ftype = -1 + if mode not in "rw": + raise ValueError('Invalid mode (must be "r" or "w").') + + # Try getting format from the content. Note that some files + # do not have a header that allows reading the format from + # the file. + if mode == "r": + if bb is not None: + fimemory = lib.FreeImage_OpenMemory(ctypes.c_char_p(bb), len(bb)) + ftype = lib.FreeImage_GetFileTypeFromMemory( + ctypes.c_void_p(fimemory), len(bb) + ) + lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + if (ftype == -1) and os.path.isfile(filename): + ftype = lib.FreeImage_GetFileType(efn(filename), 0) + # Try getting the format from the extension + if ftype == -1: + ftype = lib.FreeImage_GetFIFFromFilename(efn(filename)) + + # Test if ok + if ftype == -1: + raise ValueError('Cannot determine format of file "%s"' % filename) + elif mode == "w" and not lib.FreeImage_FIFSupportsWriting(ftype): + raise ValueError('Cannot write the format of file "%s"' % filename) + elif mode == "r" and not lib.FreeImage_FIFSupportsReading(ftype): + raise ValueError('Cannot read the format of file "%s"' % filename) + return ftype + + def create_bitmap(self, filename, ftype, flags=0): + """create_bitmap(filename, ftype, flags=0) + Create a wrapped bitmap object. + """ + return FIBitmap(self, filename, ftype, flags) + + def create_multipage_bitmap(self, filename, ftype, flags=0): + """create_multipage_bitmap(filename, ftype, flags=0) + Create a wrapped multipage bitmap object. + """ + return FIMultipageBitmap(self, filename, ftype, flags) + + +class FIBaseBitmap(object): + def __init__(self, fi, filename, ftype, flags): + self._fi = fi + self._filename = filename + self._ftype = ftype + self._flags = flags + self._bitmap = None + self._close_funcs = [] + + def __del__(self): + self.close() + + def close(self): + if (self._bitmap is not None) and self._close_funcs: + for close_func in self._close_funcs: + try: + with self._fi: + fun = close_func[0] + fun(*close_func[1:]) + except Exception: # pragma: no cover + pass + self._close_funcs = [] + self._bitmap = None + + def _set_bitmap(self, bitmap, close_func=None): + """Function to set the bitmap and specify the function to unload it.""" + if self._bitmap is not None: + pass # bitmap is converted + if close_func is None: + close_func = self._fi.lib.FreeImage_Unload, bitmap + + self._bitmap = bitmap + if close_func: + self._close_funcs.append(close_func) + + def get_meta_data(self): + # todo: there is also FreeImage_TagToString, is that useful? + # and would that work well when reading and then saving? + + # Create a list of (model_name, number) tuples + models = [ + (name[5:], number) + for name, number in METADATA_MODELS.__dict__.items() + if name.startswith("FIMD_") + ] + + # Prepare + metadata = Dict() + tag = ctypes.c_void_p() + + with self._fi as lib: + # Iterate over all FreeImage meta models + for model_name, number in models: + # Find beginning, get search handle + mdhandle = lib.FreeImage_FindFirstMetadata( + number, self._bitmap, ctypes.byref(tag) + ) + mdhandle = ctypes.c_void_p(mdhandle) + if mdhandle: + # Iterate over all tags in this model + more = True + while more: + # Get info about tag + tag_name = lib.FreeImage_GetTagKey(tag).decode("utf-8") + tag_type = lib.FreeImage_GetTagType(tag) + byte_size = lib.FreeImage_GetTagLength(tag) + char_ptr = ctypes.c_char * byte_size + data = char_ptr.from_address(lib.FreeImage_GetTagValue(tag)) + # Convert in a way compatible with Pypy + tag_bytes = bytes(bytearray(data)) + # The default value is the raw bytes + tag_val = tag_bytes + # Convert to a Python value in the metadata dict + if tag_type == METADATA_DATATYPE.FIDT_ASCII: + tag_val = tag_bytes.decode("utf-8", "replace") + elif tag_type in METADATA_DATATYPE.dtypes: + dtype = METADATA_DATATYPE.dtypes[tag_type] + if IS_PYPY and isinstance(dtype, (list, tuple)): + pass # pragma: no cover - or we get a segfault + else: + try: + tag_val = numpy.frombuffer( + tag_bytes, dtype=dtype + ).copy() + if len(tag_val) == 1: + tag_val = tag_val[0] + except Exception: # pragma: no cover + pass + # Store data in dict + subdict = metadata.setdefault(model_name, Dict()) + subdict[tag_name] = tag_val + # Next + more = lib.FreeImage_FindNextMetadata( + mdhandle, ctypes.byref(tag) + ) + + # Close search handle for current meta model + lib.FreeImage_FindCloseMetadata(mdhandle) + + # Done + return metadata + + def set_meta_data(self, metadata): + # Create a dict mapping model_name to number + models = {} + for name, number in METADATA_MODELS.__dict__.items(): + if name.startswith("FIMD_"): + models[name[5:]] = number + + # Create a mapping from numpy.dtype to METADATA_DATATYPE + def get_tag_type_number(dtype): + for number, numpy_dtype in METADATA_DATATYPE.dtypes.items(): + if dtype == numpy_dtype: + return number + else: + return None + + with self._fi as lib: + for model_name, subdict in metadata.items(): + # Get model number + number = models.get(model_name, None) + if number is None: + continue # Unknown model, silent ignore + + for tag_name, tag_val in subdict.items(): + # Create new tag + tag = lib.FreeImage_CreateTag() + tag = ctypes.c_void_p(tag) + + try: + # Convert Python value to FI type, val + is_ascii = False + if isinstance(tag_val, str): + try: + tag_bytes = tag_val.encode("ascii") + is_ascii = True + except UnicodeError: + pass + if is_ascii: + tag_type = METADATA_DATATYPE.FIDT_ASCII + tag_count = len(tag_bytes) + else: + if not hasattr(tag_val, "dtype"): + tag_val = numpy.array([tag_val]) + tag_type = get_tag_type_number(tag_val.dtype) + if tag_type is None: + logger.warning( + "imageio.freeimage warning: Could not " + "determine tag type of %r." % tag_name + ) + continue + tag_bytes = tag_val.tobytes() + tag_count = tag_val.size + # Set properties + lib.FreeImage_SetTagKey(tag, tag_name.encode("utf-8")) + lib.FreeImage_SetTagType(tag, tag_type) + lib.FreeImage_SetTagCount(tag, tag_count) + lib.FreeImage_SetTagLength(tag, len(tag_bytes)) + lib.FreeImage_SetTagValue(tag, tag_bytes) + # Store tag + tag_key = lib.FreeImage_GetTagKey(tag) + lib.FreeImage_SetMetadata(number, self._bitmap, tag_key, tag) + + except Exception as err: # pragma: no cover + logger.warning( + "imagio.freeimage warning: Could not set tag " + "%r: %s, %s" + % (tag_name, self._fi._get_error_message(), str(err)) + ) + finally: + lib.FreeImage_DeleteTag(tag) + + +class FIBitmap(FIBaseBitmap): + """Wrapper for the FI bitmap object.""" + + def allocate(self, array): + # Prepare array + assert isinstance(array, numpy.ndarray) + shape = array.shape + dtype = array.dtype + + # Get shape and channel info + r, c = shape[:2] + if len(shape) == 2: + n_channels = 1 + elif len(shape) == 3: + n_channels = shape[2] + else: + n_channels = shape[0] + + # Get fi_type + try: + fi_type = FI_TYPES.fi_types[(dtype.type, n_channels)] + self._fi_type = fi_type + except KeyError: + raise ValueError("Cannot write arrays of given type and shape.") + + # Allocate bitmap + with self._fi as lib: + bpp = 8 * dtype.itemsize * n_channels + bitmap = lib.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0) + bitmap = ctypes.c_void_p(bitmap) + + # Check and store + if not bitmap: # pragma: no cover + raise RuntimeError( + "Could not allocate bitmap for storage: %s" + % self._fi._get_error_message() + ) + self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + + def load_from_filename(self, filename=None): + if filename is None: + filename = self._filename + + with self._fi as lib: + # Create bitmap + bitmap = lib.FreeImage_Load(self._ftype, efn(filename), self._flags) + bitmap = ctypes.c_void_p(bitmap) + + # Check and store + if not bitmap: # pragma: no cover + raise ValueError( + 'Could not load bitmap "%s": %s' + % (self._filename, self._fi._get_error_message()) + ) + self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + + # def load_from_bytes(self, bb): + # with self._fi as lib: + # # Create bitmap + # fimemory = lib.FreeImage_OpenMemory( + # ctypes.c_char_p(bb), len(bb)) + # bitmap = lib.FreeImage_LoadFromMemory( + # self._ftype, ctypes.c_void_p(fimemory), self._flags) + # bitmap = ctypes.c_void_p(bitmap) + # lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + # + # # Check + # if not bitmap: + # raise ValueError('Could not load bitmap "%s": %s' + # % (self._filename, self._fi._get_error_message())) + # else: + # self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + + def save_to_filename(self, filename=None): + if filename is None: + filename = self._filename + + ftype = self._ftype + bitmap = self._bitmap + fi_type = self._fi_type # element type + + with self._fi as lib: + # Check if can write + if fi_type == FI_TYPES.FIT_BITMAP: + can_write = lib.FreeImage_FIFSupportsExportBPP( + ftype, lib.FreeImage_GetBPP(bitmap) + ) + else: + can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type) + if not can_write: + raise TypeError("Cannot save image of this format to this file type") + + # Save to file + res = lib.FreeImage_Save(ftype, bitmap, efn(filename), self._flags) + # Check + if res is None: # pragma: no cover, we do so many checks, this is rare + raise RuntimeError( + f"Could not save file `{self._filename}`: {self._fi._get_error_message()}" + ) + + # def save_to_bytes(self): + # ftype = self._ftype + # bitmap = self._bitmap + # fi_type = self._fi_type # element type + # + # with self._fi as lib: + # # Check if can write + # if fi_type == FI_TYPES.FIT_BITMAP: + # can_write = lib.FreeImage_FIFSupportsExportBPP(ftype, + # lib.FreeImage_GetBPP(bitmap)) + # else: + # can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type) + # if not can_write: + # raise TypeError('Cannot save image of this format ' + # 'to this file type') + # + # # Extract the bytes + # fimemory = lib.FreeImage_OpenMemory(0, 0) + # res = lib.FreeImage_SaveToMemory(ftype, bitmap, + # ctypes.c_void_p(fimemory), + # self._flags) + # if res: + # N = lib.FreeImage_TellMemory(ctypes.c_void_p(fimemory)) + # result = ctypes.create_string_buffer(N) + # lib.FreeImage_SeekMemory(ctypes.c_void_p(fimemory), 0) + # lib.FreeImage_ReadMemory(result, 1, N, ctypes.c_void_p(fimemory)) + # result = result.raw + # lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + # + # # Check + # if not res: + # raise RuntimeError('Could not save file "%s": %s' + # % (self._filename, self._fi._get_error_message())) + # + # # Done + # return result + + def get_image_data(self): + dtype, shape, bpp = self._get_type_and_shape() + array = self._wrap_bitmap_bits_in_array(shape, dtype, False) + with self._fi as lib: + isle = lib.FreeImage_IsLittleEndian() + + # swizzle the color components and flip the scanlines to go from + # FreeImage's BGR[A] and upside-down internal memory format to + # something more normal + def n(arr): + # return arr[..., ::-1].T # Does not work on numpypy yet + if arr.ndim == 1: # pragma: no cover + return arr[::-1].T + elif arr.ndim == 2: # Always the case here ... + return arr[:, ::-1].T + elif arr.ndim == 3: # pragma: no cover + return arr[:, :, ::-1].T + elif arr.ndim == 4: # pragma: no cover + return arr[:, :, :, ::-1].T + + if len(shape) == 3 and isle and dtype.type == numpy.uint8: + b = n(array[0]) + g = n(array[1]) + r = n(array[2]) + if shape[0] == 3: + return numpy.dstack((r, g, b)) + elif shape[0] == 4: + a = n(array[3]) + return numpy.dstack((r, g, b, a)) + else: # pragma: no cover - we check this earlier + raise ValueError("Cannot handle images of shape %s" % shape) + + # We need to copy because array does *not* own its memory + # after bitmap is freed. + a = n(array).copy() + return a + + def set_image_data(self, array): + # Prepare array + assert isinstance(array, numpy.ndarray) + shape = array.shape + dtype = array.dtype + with self._fi as lib: + isle = lib.FreeImage_IsLittleEndian() + + # Calculate shape and channels + r, c = shape[:2] + if len(shape) == 2: + n_channels = 1 + w_shape = (c, r) + elif len(shape) == 3: + n_channels = shape[2] + w_shape = (n_channels, c, r) + else: + n_channels = shape[0] + + def n(arr): # normalise to freeimage's in-memory format + return arr[::-1].T + + wrapped_array = self._wrap_bitmap_bits_in_array(w_shape, dtype, True) + # swizzle the color components and flip the scanlines to go to + # FreeImage's BGR[A] and upside-down internal memory format + # The BGR[A] order is only used for 8bits per channel images + # on little endian machines. For everything else RGB[A] is + # used. + if len(shape) == 3 and isle and dtype.type == numpy.uint8: + R = array[:, :, 0] + G = array[:, :, 1] + B = array[:, :, 2] + wrapped_array[0] = n(B) + wrapped_array[1] = n(G) + wrapped_array[2] = n(R) + if shape[2] == 4: + A = array[:, :, 3] + wrapped_array[3] = n(A) + else: + wrapped_array[:] = n(array) + if self._need_finish: + self._finish_wrapped_array(wrapped_array) + + if len(shape) == 2 and dtype.type == numpy.uint8: + with self._fi as lib: + palette = lib.FreeImage_GetPalette(self._bitmap) + palette = ctypes.c_void_p(palette) + if not palette: + raise RuntimeError("Could not get image palette") + try: + palette_data = GREY_PALETTE.ctypes.data + except Exception: # pragma: no cover - IS_PYPY + palette_data = GREY_PALETTE.__array_interface__["data"][0] + ctypes.memmove(palette, palette_data, 1024) + + def _wrap_bitmap_bits_in_array(self, shape, dtype, save): + """Return an ndarray view on the data in a FreeImage bitmap. Only + valid for as long as the bitmap is loaded (if single page) / locked + in memory (if multipage). This is used in loading data, but + also during saving, to prepare a strided numpy array buffer. + + """ + # Get bitmap info + with self._fi as lib: + pitch = lib.FreeImage_GetPitch(self._bitmap) + bits = lib.FreeImage_GetBits(self._bitmap) + + # Get more info + height = shape[-1] + byte_size = height * pitch + itemsize = dtype.itemsize + + # Get strides + if len(shape) == 3: + strides = (itemsize, shape[0] * itemsize, pitch) + else: + strides = (itemsize, pitch) + + # Create numpy array and return + data = (ctypes.c_char * byte_size).from_address(bits) + try: + self._need_finish = False + if TEST_NUMPY_NO_STRIDES: + raise NotImplementedError() + return numpy.ndarray(shape, dtype=dtype, buffer=data, strides=strides) + except NotImplementedError: + # IS_PYPY - not very efficient. We create a C-contiguous + # numpy array (because pypy does not support Fortran-order) + # and shape it such that the rest of the code can remain. + if save: + self._need_finish = True # Flag to use _finish_wrapped_array + return numpy.zeros(shape, dtype=dtype) + else: + bb = bytes(bytearray(data)) + array = numpy.frombuffer(bb, dtype=dtype).copy() + # Deal with strides + if len(shape) == 3: + array.shape = shape[2], strides[-1] // shape[0], shape[0] + array2 = array[: shape[2], : shape[1], : shape[0]] + array = numpy.zeros(shape, dtype=array.dtype) + for i in range(shape[0]): + array[i] = array2[:, :, i].T + else: + array.shape = shape[1], strides[-1] + array = array[: shape[1], : shape[0]].T + return array + + def _finish_wrapped_array(self, array): # IS_PYPY + """Hardcore way to inject numpy array in bitmap.""" + # Get bitmap info + with self._fi as lib: + pitch = lib.FreeImage_GetPitch(self._bitmap) + bits = lib.FreeImage_GetBits(self._bitmap) + bpp = lib.FreeImage_GetBPP(self._bitmap) + # Get channels and realwidth + nchannels = bpp // 8 // array.itemsize + realwidth = pitch // nchannels + # Apply padding for pitch if necessary + extra = realwidth - array.shape[-2] + assert 0 <= extra < 10 + # Make sort of Fortran, also take padding (i.e. pitch) into account + newshape = array.shape[-1], realwidth, nchannels + array2 = numpy.zeros(newshape, array.dtype) + if nchannels == 1: + array2[:, : array.shape[-2], 0] = array.T + else: + for i in range(nchannels): + array2[:, : array.shape[-2], i] = array[i, :, :].T + # copy data + data_ptr = array2.__array_interface__["data"][0] + ctypes.memmove(bits, data_ptr, array2.nbytes) + del array2 + + def _get_type_and_shape(self): + bitmap = self._bitmap + + # Get info on bitmap + with self._fi as lib: + w = lib.FreeImage_GetWidth(bitmap) + h = lib.FreeImage_GetHeight(bitmap) + self._fi_type = fi_type = lib.FreeImage_GetImageType(bitmap) + if not fi_type: + raise ValueError("Unknown image pixel type") + + # Determine required props for numpy array + bpp = None + dtype = FI_TYPES.dtypes[fi_type] + + if fi_type == FI_TYPES.FIT_BITMAP: + with self._fi as lib: + bpp = lib.FreeImage_GetBPP(bitmap) + has_pallette = lib.FreeImage_GetColorsUsed(bitmap) + if has_pallette: + # Examine the palette. If it is grayscale, we return as such + if has_pallette == 256: + palette = lib.FreeImage_GetPalette(bitmap) + palette = ctypes.c_void_p(palette) + p = (ctypes.c_uint8 * (256 * 4)).from_address(palette.value) + p = numpy.frombuffer(p, numpy.uint32).copy() + if (GREY_PALETTE == p).all(): + extra_dims = [] + return numpy.dtype(dtype), extra_dims + [w, h], bpp + # Convert bitmap and call this method again + newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap) + newbitmap = ctypes.c_void_p(newbitmap) + self._set_bitmap(newbitmap) + return self._get_type_and_shape() + elif bpp == 8: + extra_dims = [] + elif bpp == 24: + extra_dims = [3] + elif bpp == 32: + extra_dims = [4] + else: # pragma: no cover + # raise ValueError('Cannot convert %d BPP bitmap' % bpp) + # Convert bitmap and call this method again + newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap) + newbitmap = ctypes.c_void_p(newbitmap) + self._set_bitmap(newbitmap) + return self._get_type_and_shape() + else: + extra_dims = FI_TYPES.extra_dims[fi_type] + + # Return dtype and shape + return numpy.dtype(dtype), extra_dims + [w, h], bpp + + def quantize(self, quantizer=0, palettesize=256): + """Quantize the bitmap to make it 8-bit (paletted). Returns a new + FIBitmap object. + Only for 24 bit images. + """ + with self._fi as lib: + # New bitmap + bitmap = lib.FreeImage_ColorQuantizeEx( + self._bitmap, quantizer, palettesize, 0, None + ) + bitmap = ctypes.c_void_p(bitmap) + + # Check and return + if not bitmap: + raise ValueError( + 'Could not quantize bitmap "%s": %s' + % (self._filename, self._fi._get_error_message()) + ) + + new = FIBitmap(self._fi, self._filename, self._ftype, self._flags) + new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + new._fi_type = self._fi_type + return new + + +# def convert_to_32bit(self): +# """ Convert to 32bit image. +# """ +# with self._fi as lib: +# # New bitmap +# bitmap = lib.FreeImage_ConvertTo32Bits(self._bitmap) +# bitmap = ctypes.c_void_p(bitmap) +# +# # Check and return +# if not bitmap: +# raise ValueError('Could not convert bitmap to 32bit "%s": %s' % +# (self._filename, +# self._fi._get_error_message())) +# else: +# new = FIBitmap(self._fi, self._filename, self._ftype, +# self._flags) +# new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) +# new._fi_type = self._fi_type +# return new + + +class FIMultipageBitmap(FIBaseBitmap): + """Wrapper for the multipage FI bitmap object.""" + + def load_from_filename(self, filename=None): + if filename is None: # pragma: no cover + filename = self._filename + + # Prepare + create_new = False + read_only = True + keep_cache_in_memory = False + + # Try opening + with self._fi as lib: + # Create bitmap + multibitmap = lib.FreeImage_OpenMultiBitmap( + self._ftype, + efn(filename), + create_new, + read_only, + keep_cache_in_memory, + self._flags, + ) + multibitmap = ctypes.c_void_p(multibitmap) + + # Check + if not multibitmap: # pragma: no cover + err = self._fi._get_error_message() + raise ValueError( + 'Could not open file "%s" as multi-image: %s' + % (self._filename, err) + ) + self._set_bitmap(multibitmap, (lib.FreeImage_CloseMultiBitmap, multibitmap)) + + # def load_from_bytes(self, bb): + # with self._fi as lib: + # # Create bitmap + # fimemory = lib.FreeImage_OpenMemory( + # ctypes.c_char_p(bb), len(bb)) + # multibitmap = lib.FreeImage_LoadMultiBitmapFromMemory( + # self._ftype, ctypes.c_void_p(fimemory), self._flags) + # multibitmap = ctypes.c_void_p(multibitmap) + # #lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + # self._mem = fimemory + # self._bytes = bb + # # Check + # if not multibitmap: + # raise ValueError('Could not load multibitmap "%s": %s' + # % (self._filename, self._fi._get_error_message())) + # else: + # self._set_bitmap(multibitmap, + # (lib.FreeImage_CloseMultiBitmap, multibitmap)) + + def save_to_filename(self, filename=None): + if filename is None: # pragma: no cover + filename = self._filename + + # Prepare + create_new = True + read_only = False + keep_cache_in_memory = False + + # Open the file + # todo: Set flags at close func + with self._fi as lib: + multibitmap = lib.FreeImage_OpenMultiBitmap( + self._ftype, + efn(filename), + create_new, + read_only, + keep_cache_in_memory, + 0, + ) + multibitmap = ctypes.c_void_p(multibitmap) + + # Check + if not multibitmap: # pragma: no cover + msg = 'Could not open file "%s" for writing multi-image: %s' % ( + self._filename, + self._fi._get_error_message(), + ) + raise ValueError(msg) + self._set_bitmap(multibitmap, (lib.FreeImage_CloseMultiBitmap, multibitmap)) + + def __len__(self): + with self._fi as lib: + return lib.FreeImage_GetPageCount(self._bitmap) + + def get_page(self, index): + """Return the sub-bitmap for the given page index. + Please close the returned bitmap when done. + """ + with self._fi as lib: + # Create low-level bitmap in freeimage + bitmap = lib.FreeImage_LockPage(self._bitmap, index) + bitmap = ctypes.c_void_p(bitmap) + if not bitmap: # pragma: no cover + raise ValueError( + "Could not open sub-image %i in %r: %s" + % (index, self._filename, self._fi._get_error_message()) + ) + + # Get bitmap object to wrap this bitmap + bm = FIBitmap(self._fi, self._filename, self._ftype, self._flags) + bm._set_bitmap( + bitmap, (lib.FreeImage_UnlockPage, self._bitmap, bitmap, False) + ) + return bm + + def append_bitmap(self, bitmap): + """Add a sub-bitmap to the multi-page bitmap.""" + with self._fi as lib: + # no return value + lib.FreeImage_AppendPage(self._bitmap, bitmap._bitmap) + + +# Create instance +fi = Freeimage() diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/_swf.py b/parrot/lib/python3.10/site-packages/imageio/plugins/_swf.py new file mode 100644 index 0000000000000000000000000000000000000000..98ca3a4b0520200fd78508feeba68102db8b3f9a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/_swf.py @@ -0,0 +1,897 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. +# This code was taken from https://github.com/almarklein/visvis/blob/master/vvmovie/images2swf.py + +# styletest: ignore E261 + +""" +Provides a function (write_swf) to store a series of numpy arrays in an +SWF movie, that can be played on a wide range of OS's. + +In desperation of wanting to share animated images, and then lacking a good +writer for animated gif or .avi, I decided to look into SWF. This format +is very well documented. + +This is a pure python module to create an SWF file that shows a series +of images. The images are stored using the DEFLATE algorithm (same as +PNG and ZIP and which is included in the standard Python distribution). +As this compression algorithm is much more effective than that used in +GIF images, we obtain better quality (24 bit colors + alpha channel) +while still producesing smaller files (a test showed ~75%). Although +SWF also allows for JPEG compression, doing so would probably require +a third party library for the JPEG encoding/decoding, we could +perhaps do this via Pillow or freeimage. + +sources and tools: + +- SWF on wikipedia +- Adobes "SWF File Format Specification" version 10 + (http://www.adobe.com/devnet/swf/pdf/swf_file_format_spec_v10.pdf) +- swftools (swfdump in specific) for debugging +- iwisoft swf2avi can be used to convert swf to avi/mpg/flv with really + good quality, while file size is reduced with factors 20-100. + A good program in my opinion. The free version has the limitation + of a watermark in the upper left corner. + +""" + +import os +import zlib +import time # noqa +import logging + +import numpy as np + + +logger = logging.getLogger(__name__) + +# todo: use Pillow to support reading JPEG images from SWF? + + +# Base functions and classes + + +class BitArray: + """Dynamic array of bits that automatically resizes + with factors of two. + Append bits using .append() or += + You can reverse bits using .reverse() + """ + + def __init__(self, initvalue=None): + self.data = np.zeros((16,), dtype=np.uint8) + self._len = 0 + if initvalue is not None: + self.append(initvalue) + + def __len__(self): + return self._len # self.data.shape[0] + + def __repr__(self): + return self.data[: self._len].tobytes().decode("ascii") + + def _checkSize(self): + # check length... grow if necessary + arraylen = self.data.shape[0] + if self._len >= arraylen: + tmp = np.zeros((arraylen * 2,), dtype=np.uint8) + tmp[: self._len] = self.data[: self._len] + self.data = tmp + + def __add__(self, value): + self.append(value) + return self + + def append(self, bits): + # check input + if isinstance(bits, BitArray): + bits = str(bits) + if isinstance(bits, int): # pragma: no cover - we dont use it + bits = str(bits) + if not isinstance(bits, str): # pragma: no cover + raise ValueError("Append bits as strings or integers!") + + # add bits + for bit in bits: + self.data[self._len] = ord(bit) + self._len += 1 + self._checkSize() + + def reverse(self): + """In-place reverse.""" + tmp = self.data[: self._len].copy() + self.data[: self._len] = tmp[::-1] + + def tobytes(self): + """Convert to bytes. If necessary, + zeros are padded to the end (right side). + """ + bits = str(self) + + # determine number of bytes + nbytes = 0 + while nbytes * 8 < len(bits): + nbytes += 1 + # pad + bits = bits.ljust(nbytes * 8, "0") + + # go from bits to bytes + bb = bytes() + for i in range(nbytes): + tmp = int(bits[i * 8 : (i + 1) * 8], 2) + bb += int2uint8(tmp) + + # done + return bb + + +def int2uint32(i): + return int(i).to_bytes(4, "little") + + +def int2uint16(i): + return int(i).to_bytes(2, "little") + + +def int2uint8(i): + return int(i).to_bytes(1, "little") + + +def int2bits(i, n=None): + """convert int to a string of bits (0's and 1's in a string), + pad to n elements. Convert back using int(ss,2).""" + ii = i + + # make bits + bb = BitArray() + while ii > 0: + bb += str(ii % 2) + ii = ii >> 1 + bb.reverse() + + # justify + if n is not None: + if len(bb) > n: # pragma: no cover + raise ValueError("int2bits fail: len larger than padlength.") + bb = str(bb).rjust(n, "0") + + # done + return BitArray(bb) + + +def bits2int(bb, n=8): + # Init + value = "" + + # Get value in bits + for i in range(len(bb)): + b = bb[i : i + 1] + tmp = bin(ord(b))[2:] + # value += tmp.rjust(8,'0') + value = tmp.rjust(8, "0") + value + + # Make decimal + return int(value[:n], 2) + + +def get_type_and_len(bb): + """bb should be 6 bytes at least + Return (type, length, length_of_full_tag) + """ + # Init + value = "" + + # Get first 16 bits + for i in range(2): + b = bb[i : i + 1] + tmp = bin(ord(b))[2:] + # value += tmp.rjust(8,'0') + value = tmp.rjust(8, "0") + value + + # Get type and length + type = int(value[:10], 2) + L = int(value[10:], 2) + L2 = L + 2 + + # Long tag header? + if L == 63: # '111111' + value = "" + for i in range(2, 6): + b = bb[i : i + 1] # becomes a single-byte bytes() + tmp = bin(ord(b))[2:] + # value += tmp.rjust(8,'0') + value = tmp.rjust(8, "0") + value + L = int(value, 2) + L2 = L + 6 + + # Done + return type, L, L2 + + +def signedint2bits(i, n=None): + """convert signed int to a string of bits (0's and 1's in a string), + pad to n elements. Negative numbers are stored in 2's complement bit + patterns, thus positive numbers always start with a 0. + """ + + # negative number? + ii = i + if i < 0: + # A negative number, -n, is represented as the bitwise opposite of + ii = abs(ii) - 1 # the positive-zero number n-1. + + # make bits + bb = BitArray() + while ii > 0: + bb += str(ii % 2) + ii = ii >> 1 + bb.reverse() + + # justify + bb = "0" + str(bb) # always need the sign bit in front + if n is not None: + if len(bb) > n: # pragma: no cover + raise ValueError("signedint2bits fail: len larger than padlength.") + bb = bb.rjust(n, "0") + + # was it negative? (then opposite bits) + if i < 0: + bb = bb.replace("0", "x").replace("1", "0").replace("x", "1") + + # done + return BitArray(bb) + + +def twits2bits(arr): + """Given a few (signed) numbers, store them + as compactly as possible in the wat specifief by the swf format. + The numbers are multiplied by 20, assuming they + are twits. + Can be used to make the RECT record. + """ + + # first determine length using non justified bit strings + maxlen = 1 + for i in arr: + tmp = len(signedint2bits(i * 20)) + if tmp > maxlen: + maxlen = tmp + + # build array + bits = int2bits(maxlen, 5) + for i in arr: + bits += signedint2bits(i * 20, maxlen) + + return bits + + +def floats2bits(arr): + """Given a few (signed) numbers, convert them to bits, + stored as FB (float bit values). We always use 16.16. + Negative numbers are not (yet) possible, because I don't + know how the're implemented (ambiguity). + """ + bits = int2bits(31, 5) # 32 does not fit in 5 bits! + for i in arr: + if i < 0: # pragma: no cover + raise ValueError("Dit not implement negative floats!") + i1 = int(i) + i2 = i - i1 + bits += int2bits(i1, 15) + bits += int2bits(i2 * 2**16, 16) + return bits + + +# Base Tag + + +class Tag: + def __init__(self): + self.bytes = bytes() + self.tagtype = -1 + + def process_tag(self): + """Implement this to create the tag.""" + raise NotImplementedError() + + def get_tag(self): + """Calls processTag and attaches the header.""" + self.process_tag() + + # tag to binary + bits = int2bits(self.tagtype, 10) + + # complete header uint16 thing + bits += "1" * 6 # = 63 = 0x3f + # make uint16 + bb = int2uint16(int(str(bits), 2)) + + # now add 32bit length descriptor + bb += int2uint32(len(self.bytes)) + + # done, attach and return + bb += self.bytes + return bb + + def make_rect_record(self, xmin, xmax, ymin, ymax): + """Simply uses makeCompactArray to produce + a RECT Record.""" + return twits2bits([xmin, xmax, ymin, ymax]) + + def make_matrix_record(self, scale_xy=None, rot_xy=None, trans_xy=None): + # empty matrix? + if scale_xy is None and rot_xy is None and trans_xy is None: + return "0" * 8 + + # init + bits = BitArray() + + # scale + if scale_xy: + bits += "1" + bits += floats2bits([scale_xy[0], scale_xy[1]]) + else: + bits += "0" + + # rotation + if rot_xy: + bits += "1" + bits += floats2bits([rot_xy[0], rot_xy[1]]) + else: + bits += "0" + + # translation (no flag here) + if trans_xy: + bits += twits2bits([trans_xy[0], trans_xy[1]]) + else: + bits += twits2bits([0, 0]) + + # done + return bits + + +# Control tags + + +class ControlTag(Tag): + def __init__(self): + Tag.__init__(self) + + +class FileAttributesTag(ControlTag): + def __init__(self): + ControlTag.__init__(self) + self.tagtype = 69 + + def process_tag(self): + self.bytes = "\x00".encode("ascii") * (1 + 3) + + +class ShowFrameTag(ControlTag): + def __init__(self): + ControlTag.__init__(self) + self.tagtype = 1 + + def process_tag(self): + self.bytes = bytes() + + +class SetBackgroundTag(ControlTag): + """Set the color in 0-255, or 0-1 (if floats given).""" + + def __init__(self, *rgb): + self.tagtype = 9 + if len(rgb) == 1: + rgb = rgb[0] + self.rgb = rgb + + def process_tag(self): + bb = bytes() + for i in range(3): + clr = self.rgb[i] + if isinstance(clr, float): # pragma: no cover - not used + clr = clr * 255 + bb += int2uint8(clr) + self.bytes = bb + + +class DoActionTag(Tag): + def __init__(self, action="stop"): + Tag.__init__(self) + self.tagtype = 12 + self.actions = [action] + + def append(self, action): # pragma: no cover - not used + self.actions.append(action) + + def process_tag(self): + bb = bytes() + + for action in self.actions: + action = action.lower() + if action == "stop": + bb += "\x07".encode("ascii") + elif action == "play": # pragma: no cover - not used + bb += "\x06".encode("ascii") + else: # pragma: no cover + logger.warning("unknown action: %s" % action) + + bb += int2uint8(0) + self.bytes = bb + + +# Definition tags +class DefinitionTag(Tag): + counter = 0 # to give automatically id's + + def __init__(self): + Tag.__init__(self) + DefinitionTag.counter += 1 + self.id = DefinitionTag.counter # id in dictionary + + +class BitmapTag(DefinitionTag): + def __init__(self, im): + DefinitionTag.__init__(self) + self.tagtype = 36 # DefineBitsLossless2 + + # convert image (note that format is ARGB) + # even a grayscale image is stored in ARGB, nevertheless, + # the fabilous deflate compression will make it that not much + # more data is required for storing (25% or so, and less than 10% + # when storing RGB as ARGB). + + if len(im.shape) == 3: + if im.shape[2] in [3, 4]: + tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255 + for i in range(3): + tmp[:, :, i + 1] = im[:, :, i] + if im.shape[2] == 4: + tmp[:, :, 0] = im[:, :, 3] # swap channel where alpha is + else: # pragma: no cover + raise ValueError("Invalid shape to be an image.") + + elif len(im.shape) == 2: + tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255 + for i in range(3): + tmp[:, :, i + 1] = im[:, :] + else: # pragma: no cover + raise ValueError("Invalid shape to be an image.") + + # we changed the image to uint8 4 channels. + # now compress! + self._data = zlib.compress(tmp.tobytes(), zlib.DEFLATED) + self.imshape = im.shape + + def process_tag(self): + # build tag + bb = bytes() + bb += int2uint16(self.id) # CharacterID + bb += int2uint8(5) # BitmapFormat + bb += int2uint16(self.imshape[1]) # BitmapWidth + bb += int2uint16(self.imshape[0]) # BitmapHeight + bb += self._data # ZlibBitmapData + + self.bytes = bb + + +class PlaceObjectTag(ControlTag): + def __init__(self, depth, idToPlace=None, xy=(0, 0), move=False): + ControlTag.__init__(self) + self.tagtype = 26 + self.depth = depth + self.idToPlace = idToPlace + self.xy = xy + self.move = move + + def process_tag(self): + # retrieve stuff + depth = self.depth + xy = self.xy + id = self.idToPlace + + # build PlaceObject2 + bb = bytes() + if self.move: + bb += "\x07".encode("ascii") + else: + # (8 bit flags): 4:matrix, 2:character, 1:move + bb += "\x06".encode("ascii") + bb += int2uint16(depth) # Depth + bb += int2uint16(id) # character id + bb += self.make_matrix_record(trans_xy=xy).tobytes() # MATRIX record + self.bytes = bb + + +class ShapeTag(DefinitionTag): + def __init__(self, bitmapId, xy, wh): + DefinitionTag.__init__(self) + self.tagtype = 2 + self.bitmapId = bitmapId + self.xy = xy + self.wh = wh + + def process_tag(self): + """Returns a defineshape tag. with a bitmap fill""" + + bb = bytes() + bb += int2uint16(self.id) + xy, wh = self.xy, self.wh + tmp = self.make_rect_record(xy[0], wh[0], xy[1], wh[1]) # ShapeBounds + bb += tmp.tobytes() + + # make SHAPEWITHSTYLE structure + + # first entry: FILLSTYLEARRAY with in it a single fill style + bb += int2uint8(1) # FillStyleCount + bb += "\x41".encode("ascii") # FillStyleType (0x41 or 0x43 unsmoothed) + bb += int2uint16(self.bitmapId) # BitmapId + # bb += '\x00' # BitmapMatrix (empty matrix with leftover bits filled) + bb += self.make_matrix_record(scale_xy=(20, 20)).tobytes() + + # # first entry: FILLSTYLEARRAY with in it a single fill style + # bb += int2uint8(1) # FillStyleCount + # bb += '\x00' # solid fill + # bb += '\x00\x00\xff' # color + + # second entry: LINESTYLEARRAY with a single line style + bb += int2uint8(0) # LineStyleCount + # bb += int2uint16(0*20) # Width + # bb += '\x00\xff\x00' # Color + + # third and fourth entry: NumFillBits and NumLineBits (4 bits each) + # I each give them four bits, so 16 styles possible. + bb += "\x44".encode("ascii") + + self.bytes = bb + + # last entries: SHAPERECORDs ... (individual shape records not aligned) + # STYLECHANGERECORD + bits = BitArray() + bits += self.make_style_change_record(0, 1, moveTo=(self.wh[0], self.wh[1])) + # STRAIGHTEDGERECORD 4x + bits += self.make_straight_edge_record(-self.wh[0], 0) + bits += self.make_straight_edge_record(0, -self.wh[1]) + bits += self.make_straight_edge_record(self.wh[0], 0) + bits += self.make_straight_edge_record(0, self.wh[1]) + + # ENDSHAPRECORD + bits += self.make_end_shape_record() + + self.bytes += bits.tobytes() + + # done + # self.bytes = bb + + def make_style_change_record(self, lineStyle=None, fillStyle=None, moveTo=None): + # first 6 flags + # Note that we use FillStyle1. If we don't flash (at least 8) does not + # recognize the frames properly when importing to library. + + bits = BitArray() + bits += "0" # TypeFlag (not an edge record) + bits += "0" # StateNewStyles (only for DefineShape2 and Defineshape3) + if lineStyle: + bits += "1" # StateLineStyle + else: + bits += "0" + if fillStyle: + bits += "1" # StateFillStyle1 + else: + bits += "0" + bits += "0" # StateFillStyle0 + if moveTo: + bits += "1" # StateMoveTo + else: + bits += "0" + + # give information + # todo: nbits for fillStyle and lineStyle is hard coded. + + if moveTo: + bits += twits2bits([moveTo[0], moveTo[1]]) + if fillStyle: + bits += int2bits(fillStyle, 4) + if lineStyle: + bits += int2bits(lineStyle, 4) + + return bits + + def make_straight_edge_record(self, *dxdy): + if len(dxdy) == 1: + dxdy = dxdy[0] + + # determine required number of bits + xbits = signedint2bits(dxdy[0] * 20) + ybits = signedint2bits(dxdy[1] * 20) + nbits = max([len(xbits), len(ybits)]) + + bits = BitArray() + bits += "11" # TypeFlag and StraightFlag + bits += int2bits(nbits - 2, 4) + bits += "1" # GeneralLineFlag + bits += signedint2bits(dxdy[0] * 20, nbits) + bits += signedint2bits(dxdy[1] * 20, nbits) + + # note: I do not make use of vertical/horizontal only lines... + + return bits + + def make_end_shape_record(self): + bits = BitArray() + bits += "0" # TypeFlag: no edge + bits += "0" * 5 # EndOfShape + return bits + + +def read_pixels(bb, i, tagType, L1): + """With pf's seed after the recordheader, reads the pixeldata.""" + + # Get info + charId = bb[i : i + 2] # noqa + i += 2 + format = ord(bb[i : i + 1]) + i += 1 + width = bits2int(bb[i : i + 2], 16) + i += 2 + height = bits2int(bb[i : i + 2], 16) + i += 2 + + # If we can, get pixeldata and make numpy array + if format != 5: + logger.warning("Can only read 24bit or 32bit RGB(A) lossless images.") + else: + # Read byte data + offset = 2 + 1 + 2 + 2 # all the info bits + bb2 = bb[i : i + (L1 - offset)] + + # Decompress and make numpy array + data = zlib.decompress(bb2) + a = np.frombuffer(data, dtype=np.uint8) + + # Set shape + if tagType == 20: + # DefineBitsLossless - RGB data + try: + a.shape = height, width, 3 + except Exception: + # Byte align stuff might cause troubles + logger.warning("Cannot read image due to byte alignment") + if tagType == 36: + # DefineBitsLossless2 - ARGB data + a.shape = height, width, 4 + # Swap alpha channel to make RGBA + b = a + a = np.zeros_like(a) + a[:, :, 0] = b[:, :, 1] + a[:, :, 1] = b[:, :, 2] + a[:, :, 2] = b[:, :, 3] + a[:, :, 3] = b[:, :, 0] + + return a + + +# Last few functions + + +# These are the original public functions, we don't use them, but we +# keep it so that in principle this module can be used stand-alone. + + +def checkImages(images): # pragma: no cover + """checkImages(images) + Check numpy images and correct intensity range etc. + The same for all movie formats. + """ + # Init results + images2 = [] + + for im in images: + if isinstance(im, np.ndarray): + # Check and convert dtype + if im.dtype == np.uint8: + images2.append(im) # Ok + elif im.dtype in [np.float32, np.float64]: + theMax = im.max() + if 128 < theMax < 300: + pass # assume 0:255 + else: + im = im.copy() + im[im < 0] = 0 + im[im > 1] = 1 + im *= 255 + images2.append(im.astype(np.uint8)) + else: + im = im.astype(np.uint8) + images2.append(im) + # Check size + if im.ndim == 2: + pass # ok + elif im.ndim == 3: + if im.shape[2] not in [3, 4]: + raise ValueError("This array can not represent an image.") + else: + raise ValueError("This array can not represent an image.") + else: + raise ValueError("Invalid image type: " + str(type(im))) + + # Done + return images2 + + +def build_file( + fp, taglist, nframes=1, framesize=(500, 500), fps=10, version=8 +): # pragma: no cover + """Give the given file (as bytes) a header.""" + + # compose header + bb = bytes() + bb += "F".encode("ascii") # uncompressed + bb += "WS".encode("ascii") # signature bytes + bb += int2uint8(version) # version + bb += "0000".encode("ascii") # FileLength (leave open for now) + bb += Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes() + bb += int2uint8(0) + int2uint8(fps) # FrameRate + bb += int2uint16(nframes) + fp.write(bb) + + # produce all tags + for tag in taglist: + fp.write(tag.get_tag()) + + # finish with end tag + fp.write("\x00\x00".encode("ascii")) + + # set size + sze = fp.tell() + fp.seek(4) + fp.write(int2uint32(sze)) + + +def write_swf(filename, images, duration=0.1, repeat=True): # pragma: no cover + """Write an swf-file from the specified images. If repeat is False, + the movie is finished with a stop action. Duration may also + be a list with durations for each frame (note that the duration + for each frame is always an integer amount of the minimum duration.) + + Images should be a list consisting numpy arrays with values between + 0 and 255 for integer types, and between 0 and 1 for float types. + + """ + + # Check images + images2 = checkImages(images) + + # Init + taglist = [FileAttributesTag(), SetBackgroundTag(0, 0, 0)] + + # Check duration + if hasattr(duration, "__len__"): + if len(duration) == len(images2): + duration = [d for d in duration] + else: + raise ValueError("len(duration) doesn't match amount of images.") + else: + duration = [duration for im in images2] + + # Build delays list + minDuration = float(min(duration)) + delays = [round(d / minDuration) for d in duration] + delays = [max(1, int(d)) for d in delays] + + # Get FPS + fps = 1.0 / minDuration + + # Produce series of tags for each image + # t0 = time.time() + nframes = 0 + for im in images2: + bm = BitmapTag(im) + wh = (im.shape[1], im.shape[0]) + sh = ShapeTag(bm.id, (0, 0), wh) + po = PlaceObjectTag(1, sh.id, move=nframes > 0) + taglist.extend([bm, sh, po]) + for i in range(delays[nframes]): + taglist.append(ShowFrameTag()) + nframes += 1 + + if not repeat: + taglist.append(DoActionTag("stop")) + + # Build file + # t1 = time.time() + fp = open(filename, "wb") + try: + build_file(fp, taglist, nframes=nframes, framesize=wh, fps=fps) + except Exception: + raise + finally: + fp.close() + # t2 = time.time() + + # logger.warning("Writing SWF took %1.2f and %1.2f seconds" % (t1-t0, t2-t1) ) + + +def read_swf(filename): # pragma: no cover + """Read all images from an SWF (shockwave flash) file. Returns a list + of numpy arrays. + + Limitation: only read the PNG encoded images (not the JPG encoded ones). + """ + + # Check whether it exists + if not os.path.isfile(filename): + raise IOError("File not found: " + str(filename)) + + # Init images + images = [] + + # Open file and read all + fp = open(filename, "rb") + bb = fp.read() + + try: + # Check opening tag + tmp = bb[0:3].decode("ascii", "ignore") + if tmp.upper() == "FWS": + pass # ok + elif tmp.upper() == "CWS": + # Decompress movie + bb = bb[:8] + zlib.decompress(bb[8:]) + else: + raise IOError("Not a valid SWF file: " + str(filename)) + + # Set filepointer at first tag (skipping framesize RECT and two uin16's + i = 8 + nbits = bits2int(bb[i : i + 1], 5) # skip FrameSize + nbits = 5 + nbits * 4 + Lrect = nbits / 8.0 + if Lrect % 1: + Lrect += 1 + Lrect = int(Lrect) + i += Lrect + 4 + + # Iterate over the tags + counter = 0 + while True: + counter += 1 + + # Get tag header + head = bb[i : i + 6] + if not head: + break # Done (we missed end tag) + + # Determine type and length + T, L1, L2 = get_type_and_len(head) + if not L2: + logger.warning("Invalid tag length, could not proceed") + break + # logger.warning(T, L2) + + # Read image if we can + if T in [20, 36]: + im = read_pixels(bb, i + 6, T, L1) + if im is not None: + images.append(im) + elif T in [6, 21, 35, 90]: + logger.warning("Ignoring JPEG image: cannot read JPEG.") + else: + pass # Not an image tag + + # Detect end tag + if T == 0: + break + + # Next tag! + i += L2 + + finally: + fp.close() + + # Done + return images + + +# Backward compatibility; same public names as when this was images2swf. +writeSwf = write_swf +readSwf = read_swf diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/_tifffile.py b/parrot/lib/python3.10/site-packages/imageio/plugins/_tifffile.py new file mode 100644 index 0000000000000000000000000000000000000000..bcdf728d8f214db6c7080f8951d73645a7bf7227 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/_tifffile.py @@ -0,0 +1,10675 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# tifffile.py + +# Copyright (c) 2008-2018, Christoph Gohlke +# Copyright (c) 2008-2018, The Regents of the University of California +# Produced at the Laboratory for Fluorescence Dynamics +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holders nor the names of any +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Read image and meta data from (bio) TIFF(R) files. Save numpy arrays as TIFF. + +Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH, +SGI, ImageJ, MicroManager, FluoView, ScanImage, SEQ, GEL, and GeoTIFF files. + +Tifffile is not a general-purpose TIFF library. +Only a subset of the TIFF specification is supported, mainly uncompressed and +losslessly compressed 1, 8, 16, 32 and 64 bit integer, 16, 32 and 64-bit float, +grayscale and RGB(A) images, which are commonly used in scientific imaging. +Specifically, reading slices of image data, image trees defined via SubIFDs, +CCITT and OJPEG compression, chroma subsampling without JPEG compression, +or IPTC and XMP metadata are not implemented. + +TIFF(R), the tagged Image File Format, is a trademark and under control of +Adobe Systems Incorporated. BigTIFF allows for files greater than 4 GB. +STK, LSM, FluoView, SGI, SEQ, GEL, and OME-TIFF, are custom extensions +defined by Molecular Devices (Universal Imaging Corporation), Carl Zeiss +MicroImaging, Olympus, Silicon Graphics International, Media Cybernetics, +Molecular Dynamics, and the Open Microscopy Environment consortium +respectively. + +For command line usage run C{python -m tifffile --help} + +:Author: + `Christoph Gohlke `_ + +:Organization: + Laboratory for Fluorescence Dynamics, University of California, Irvine + +:Version: 2018.06.15 + +Requirements +------------ +* `CPython 3.6 64-bit `_ +* `Numpy 1.14 `_ +* `Matplotlib 2.2 `_ (optional for plotting) +* `Tifffile.c 2018.02.10 `_ + (recommended for faster decoding of PackBits and LZW encoded strings) +* `Tifffile_geodb.py 2018.02.10 `_ + (optional enums for GeoTIFF metadata) +* Python 2 requires 'futures', 'enum34', 'pathlib'. + +Revisions +--------- +2018.06.15 + Pass 2680 tests. + Towards reading JPEG and other compressions via imagecodecs package (WIP). + Add function to validate TIFF using 'jhove -m TIFF-hul'. + Save bool arrays as bilevel TIFF. + Accept pathlib.Path as filenames. + Move 'software' argument from TiffWriter __init__ to save. + Raise DOS limit to 16 TB. + Lazy load lzma and zstd compressors and decompressors. + Add option to save IJMetadata tags. + Return correct number of pages for truncated series (bug fix). + Move EXIF tags to TIFF.TAG as per TIFF/EP standard. +2018.02.18 + Pass 2293 tests. + Always save RowsPerStrip and Resolution tags as required by TIFF standard. + Do not use badly typed ImageDescription. + Coherce bad ASCII string tags to bytes. + Tuning of __str__ functions. + Fix reading 'undefined' tag values (bug fix). + Read and write ZSTD compressed data. + Use hexdump to print byte strings. + Determine TIFF byte order from data dtype in imsave. + Add option to specify RowsPerStrip for compressed strips. + Allow memory map of arrays with non-native byte order. + Attempt to handle ScanImage <= 5.1 files. + Restore TiffPageSeries.pages sequence interface. + Use numpy.frombuffer instead of fromstring to read from binary data. + Parse GeoTIFF metadata. + Add option to apply horizontal differencing before compression. + Towards reading PerkinElmer QPTIFF (no test files). + Do not index out of bounds data in tifffile.c unpackbits and decodelzw. +2017.09.29 (tentative) + Many backwards incompatible changes improving speed and resource usage: + Pass 2268 tests. + Add detail argument to __str__ function. Remove info functions. + Fix potential issue correcting offsets of large LSM files with positions. + Remove TiffFile sequence interface; use TiffFile.pages instead. + Do not make tag values available as TiffPage attributes. + Use str (not bytes) type for tag and metadata strings (WIP). + Use documented standard tag and value names (WIP). + Use enums for some documented TIFF tag values. + Remove 'memmap' and 'tmpfile' options; use out='memmap' instead. + Add option to specify output in asarray functions. + Add option to concurrently decode image strips or tiles using threads. + Add TiffPage.asrgb function (WIP). + Do not apply colormap in asarray. + Remove 'colormapped', 'rgbonly', and 'scale_mdgel' options from asarray. + Consolidate metadata in TiffFile _metadata functions. + Remove non-tag metadata properties from TiffPage. + Add function to convert LSM to tiled BIN files. + Align image data in file. + Make TiffPage.dtype a numpy.dtype. + Add 'ndim' and 'size' properties to TiffPage and TiffPageSeries. + Allow imsave to write non-BigTIFF files up to ~4 GB. + Only read one page for shaped series if possible. + Add memmap function to create memory-mapped array stored in TIFF file. + Add option to save empty arrays to TIFF files. + Add option to save truncated TIFF files. + Allow single tile images to be saved contiguously. + Add optional movie mode for files with uniform pages. + Lazy load pages. + Use lightweight TiffFrame for IFDs sharing properties with key TiffPage. + Move module constants to 'TIFF' namespace (speed up module import). + Remove 'fastij' option from TiffFile. + Remove 'pages' parameter from TiffFile. + Remove TIFFfile alias. + Deprecate Python 2. + Require enum34 and futures packages on Python 2.7. + Remove Record class and return all metadata as dict instead. + Add functions to parse STK, MetaSeries, ScanImage, SVS, Pilatus metadata. + Read tags from EXIF and GPS IFDs. + Use pformat for tag and metadata values. + Fix reading some UIC tags (bug fix). + Do not modify input array in imshow (bug fix). + Fix Python implementation of unpack_ints. +2017.05.23 + Pass 1961 tests. + Write correct number of SampleFormat values (bug fix). + Use Adobe deflate code to write ZIP compressed files. + Add option to pass tag values as packed binary data for writing. + Defer tag validation to attribute access. + Use property instead of lazyattr decorator for simple expressions. +2017.03.17 + Write IFDs and tag values on word boundaries. + Read ScanImage metadata. + Remove is_rgb and is_indexed attributes from TiffFile. + Create files used by doctests. +2017.01.12 + Read Zeiss SEM metadata. + Read OME-TIFF with invalid references to external files. + Rewrite C LZW decoder (5x faster). + Read corrupted LSM files missing EOI code in LZW stream. +2017.01.01 + Add option to append images to existing TIFF files. + Read files without pages. + Read S-FEG and Helios NanoLab tags created by FEI software. + Allow saving Color Filter Array (CFA) images. + Add info functions returning more information about TiffFile and TiffPage. + Add option to read specific pages only. + Remove maxpages argument (backwards incompatible). + Remove test_tifffile function. +2016.10.28 + Pass 1944 tests. + Improve detection of ImageJ hyperstacks. + Read TVIPS metadata created by EM-MENU (by Marco Oster). + Add option to disable using OME-XML metadata. + Allow non-integer range attributes in modulo tags (by Stuart Berg). +2016.06.21 + Do not always memmap contiguous data in page series. +2016.05.13 + Add option to specify resolution unit. + Write grayscale images with extra samples when planarconfig is specified. + Do not write RGB color images with 2 samples. + Reorder TiffWriter.save keyword arguments (backwards incompatible). +2016.04.18 + Pass 1932 tests. + TiffWriter, imread, and imsave accept open binary file streams. +2016.04.13 + Correctly handle reversed fill order in 2 and 4 bps images (bug fix). + Implement reverse_bitorder in C. +2016.03.18 + Fix saving additional ImageJ metadata. +2016.02.22 + Pass 1920 tests. + Write 8 bytes double tag values using offset if necessary (bug fix). + Add option to disable writing second image description tag. + Detect tags with incorrect counts. + Disable color mapping for LSM. +2015.11.13 + Read LSM 6 mosaics. + Add option to specify directory of memory-mapped files. + Add command line options to specify vmin and vmax values for colormapping. +2015.10.06 + New helper function to apply colormaps. + Renamed is_palette attributes to is_indexed (backwards incompatible). + Color-mapped samples are now contiguous (backwards incompatible). + Do not color-map ImageJ hyperstacks (backwards incompatible). + Towards reading Leica SCN. +2015.09.25 + Read images with reversed bit order (FillOrder is LSB2MSB). +2015.09.21 + Read RGB OME-TIFF. + Warn about malformed OME-XML. +2015.09.16 + Detect some corrupted ImageJ metadata. + Better axes labels for 'shaped' files. + Do not create TiffTag for default values. + Chroma subsampling is not supported. + Memory-map data in TiffPageSeries if possible (optional). +2015.08.17 + Pass 1906 tests. + Write ImageJ hyperstacks (optional). + Read and write LZMA compressed data. + Specify datetime when saving (optional). + Save tiled and color-mapped images (optional). + Ignore void bytecounts and offsets if possible. + Ignore bogus image_depth tag created by ISS Vista software. + Decode floating point horizontal differencing (not tiled). + Save image data contiguously if possible. + Only read first IFD from ImageJ files if possible. + Read ImageJ 'raw' format (files larger than 4 GB). + TiffPageSeries class for pages with compatible shape and data type. + Try to read incomplete tiles. + Open file dialog if no filename is passed on command line. + Ignore errors when decoding OME-XML. + Rename decoder functions (backwards incompatible). +2014.08.24 + TiffWriter class for incremental writing images. + Simplify examples. +2014.08.19 + Add memmap function to FileHandle. + Add function to determine if image data in TiffPage is memory-mappable. + Do not close files if multifile_close parameter is False. +2014.08.10 + Pass 1730 tests. + Return all extrasamples by default (backwards incompatible). + Read data from series of pages into memory-mapped array (optional). + Squeeze OME dimensions (backwards incompatible). + Workaround missing EOI code in strips. + Support image and tile depth tags (SGI extension). + Better handling of STK/UIC tags (backwards incompatible). + Disable color mapping for STK. + Julian to datetime converter. + TIFF ASCII type may be NULL separated. + Unwrap strip offsets for LSM files greater than 4 GB. + Correct strip byte counts in compressed LSM files. + Skip missing files in OME series. + Read embedded TIFF files. +2014.02.05 + Save rational numbers as type 5 (bug fix). +2013.12.20 + Keep other files in OME multi-file series closed. + FileHandle class to abstract binary file handle. + Disable color mapping for bad OME-TIFF produced by bio-formats. + Read bad OME-XML produced by ImageJ when cropping. +2013.11.03 + Allow zlib compress data in imsave function (optional). + Memory-map contiguous image data (optional). +2013.10.28 + Read MicroManager metadata and little-endian ImageJ tag. + Save extra tags in imsave function. + Save tags in ascending order by code (bug fix). +2012.10.18 + Accept file like objects (read from OIB files). +2012.08.21 + Rename TIFFfile to TiffFile and TIFFpage to TiffPage. + TiffSequence class for reading sequence of TIFF files. + Read UltraQuant tags. + Allow float numbers as resolution in imsave function. +2012.08.03 + Read MD GEL tags and NIH Image header. +2012.07.25 + Read ImageJ tags. + ... + +Notes +----- +The API is not stable yet and might change between revisions. + +Tested on little-endian platforms only. + +Other Python packages and modules for reading (bio) scientific TIFF files: + +* `python-bioformats `_ +* `Imread `_ +* `PyLibTiff `_ +* `ITK `_ +* `PyLSM `_ +* `PyMca.TiffIO.py `_ (same as fabio.TiffIO) +* `BioImageXD.Readers `_ +* `Cellcognition.io `_ +* `pymimage `_ +* `pytiff `_ + +Acknowledgements +---------------- +* Egor Zindy, University of Manchester, for lsm_scan_info specifics. +* Wim Lewis for a bug fix and some LSM functions. +* Hadrien Mary for help on reading MicroManager files. +* Christian Kliche for help writing tiled and color-mapped files. + +References +---------- +1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated. + http://partners.adobe.com/public/developer/tiff/ +2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html +3) MetaMorph Stack (STK) Image File Format. + http://support.meta.moleculardevices.com/docs/t10243.pdf +4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010). + Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011 +5) The OME-TIFF format. + http://www.openmicroscopy.org/site/support/file-formats/ome-tiff +6) UltraQuant(r) Version 6.0 for Windows Start-Up Guide. + http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf +7) Micro-Manager File Formats. + http://www.micro-manager.org/wiki/Micro-Manager_File_Formats +8) Tags for TIFF and Related Specifications. Digital Preservation. + http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml +9) ScanImage BigTiff Specification - ScanImage 2016. + http://scanimage.vidriotechnologies.com/display/SI2016/ + ScanImage+BigTiff+Specification +10) CIPA DC-008-2016: Exchangeable image file format for digital still cameras: + Exif Version 2.31. + http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf + +Examples +-------- +>>> # write numpy array to TIFF file +>>> data = numpy.random.rand(4, 301, 219) +>>> imsave('temp.tif', data, photometric='minisblack') + +>>> # read numpy array from TIFF file +>>> image = imread('temp.tif') +>>> numpy.testing.assert_array_equal(image, data) + +>>> # iterate over pages and tags in TIFF file +>>> with TiffFile('temp.tif') as tif: +... images = tif.asarray() +... for page in tif.pages: +... for tag in page.tags.values(): +... _ = tag.name, tag.value +... image = page.asarray() + +""" + +from __future__ import division, print_function + +import sys +import os +import io +import re +import glob +import math +import zlib +import time +import json +import enum +import struct +import pathlib +import warnings +import binascii +import tempfile +import datetime +import threading +import collections +import multiprocessing +import concurrent.futures + +import numpy + +# delay imports: mmap, pprint, fractions, xml, tkinter, matplotlib, lzma, zstd, +# subprocess + +__version__ = "2018.06.15" +__docformat__ = "restructuredtext en" +__all__ = ( + "imsave", + "imread", + "imshow", + "memmap", + "TiffFile", + "TiffWriter", + "TiffSequence", + # utility functions used by oiffile or czifile + "FileHandle", + "lazyattr", + "natural_sorted", + "decode_lzw", + "stripnull", + "create_output", + "repeat_nd", + "format_size", + "product", + "xml2dict", +) + + +def imread(files, **kwargs): + """Return image data from TIFF file(s) as numpy array. + + Refer to the TiffFile class and member functions for documentation. + + Parameters + ---------- + files : str, binary stream, or sequence + File name, seekable binary stream, glob pattern, or sequence of + file names. + kwargs : dict + Parameters 'multifile' and 'is_ome' are passed to the TiffFile class. + The 'pattern' parameter is passed to the TiffSequence class. + Other parameters are passed to the asarray functions. + The first image series is returned if no arguments are provided. + + Examples + -------- + >>> # get image from first page + >>> imsave('temp.tif', numpy.random.rand(3, 4, 301, 219)) + >>> im = imread('temp.tif', key=0) + >>> im.shape + (4, 301, 219) + + >>> # get images from sequence of files + >>> ims = imread(['temp.tif', 'temp.tif']) + >>> ims.shape + (2, 3, 4, 301, 219) + + """ + kwargs_file = parse_kwargs(kwargs, "multifile", "is_ome") + kwargs_seq = parse_kwargs(kwargs, "pattern") + + if isinstance(files, basestring) and any(i in files for i in "?*"): + files = glob.glob(files) + if not files: + raise ValueError("no files found") + if not hasattr(files, "seek") and len(files) == 1: + files = files[0] + + if isinstance(files, basestring) or hasattr(files, "seek"): + with TiffFile(files, **kwargs_file) as tif: + return tif.asarray(**kwargs) + else: + with TiffSequence(files, **kwargs_seq) as imseq: + return imseq.asarray(**kwargs) + + +def imsave(file, data=None, shape=None, dtype=None, bigsize=2**32 - 2**25, **kwargs): + """Write numpy array to TIFF file. + + Refer to the TiffWriter class and member functions for documentation. + + Parameters + ---------- + file : str or binary stream + File name or writable binary stream, such as an open file or BytesIO. + data : array_like + Input image. The last dimensions are assumed to be image depth, + height, width, and samples. + If None, an empty array of the specified shape and dtype is + saved to file. + Unless 'byteorder' is specified in 'kwargs', the TIFF file byte order + is determined from the data's dtype or the dtype argument. + shape : tuple + If 'data' is None, shape of an empty array to save to the file. + dtype : numpy.dtype + If 'data' is None, data-type of an empty array to save to the file. + bigsize : int + Create a BigTIFF file if the size of data in bytes is larger than + this threshold and 'imagej' or 'truncate' are not enabled. + By default, the threshold is 4 GB minus 32 MB reserved for metadata. + Use the 'bigtiff' parameter to explicitly specify the type of + file created. + kwargs : dict + Parameters 'append', 'byteorder', 'bigtiff', and 'imagej', are passed + to TiffWriter(). Other parameters are passed to TiffWriter.save(). + + Returns + ------- + If the image data are written contiguously, return offset and bytecount + of image data in the file. + + Examples + -------- + >>> # save a RGB image + >>> data = numpy.random.randint(0, 255, (256, 256, 3), 'uint8') + >>> imsave('temp.tif', data, photometric='rgb') + + >>> # save a random array and metadata, using compression + >>> data = numpy.random.rand(2, 5, 3, 301, 219) + >>> imsave('temp.tif', data, compress=6, metadata={'axes': 'TZCYX'}) + + """ + tifargs = parse_kwargs(kwargs, "append", "bigtiff", "byteorder", "imagej") + if data is None: + size = product(shape) * numpy.dtype(dtype).itemsize + byteorder = numpy.dtype(dtype).byteorder + else: + try: + size = data.nbytes + byteorder = data.dtype.byteorder + except Exception: + size = 0 + byteorder = None + if ( + size > bigsize + and "bigtiff" not in tifargs + and not (tifargs.get("imagej", False) or tifargs.get("truncate", False)) + ): + tifargs["bigtiff"] = True + if "byteorder" not in tifargs: + tifargs["byteorder"] = byteorder + + with TiffWriter(file, **tifargs) as tif: + return tif.save(data, shape, dtype, **kwargs) + + +def memmap(filename, shape=None, dtype=None, page=None, series=0, mode="r+", **kwargs): + """Return memory-mapped numpy array stored in TIFF file. + + Memory-mapping requires data stored in native byte order, without tiling, + compression, predictors, etc. + If 'shape' and 'dtype' are provided, existing files will be overwritten or + appended to depending on the 'append' parameter. + Otherwise the image data of a specified page or series in an existing + file will be memory-mapped. By default, the image data of the first page + series is memory-mapped. + Call flush() to write any changes in the array to the file. + Raise ValueError if the image data in the file is not memory-mappable. + + Parameters + ---------- + filename : str + Name of the TIFF file which stores the array. + shape : tuple + Shape of the empty array. + dtype : numpy.dtype + Data-type of the empty array. + page : int + Index of the page which image data to memory-map. + series : int + Index of the page series which image data to memory-map. + mode : {'r+', 'r', 'c'}, optional + The file open mode. Default is to open existing file for reading and + writing ('r+'). + kwargs : dict + Additional parameters passed to imsave() or TiffFile(). + + Examples + -------- + >>> # create an empty TIFF file and write to memory-mapped image + >>> im = memmap('temp.tif', shape=(256, 256), dtype='float32') + >>> im[255, 255] = 1.0 + >>> im.flush() + >>> im.shape, im.dtype + ((256, 256), dtype('float32')) + >>> del im + + >>> # memory-map image data in a TIFF file + >>> im = memmap('temp.tif', page=0) + >>> im[255, 255] + 1.0 + + """ + if shape is not None and dtype is not None: + # create a new, empty array + kwargs.update( + data=None, + shape=shape, + dtype=dtype, + returnoffset=True, + align=TIFF.ALLOCATIONGRANULARITY, + ) + result = imsave(filename, **kwargs) + if result is None: + # TODO: fail before creating file or writing data + raise ValueError("image data are not memory-mappable") + offset = result[0] + else: + # use existing file + with TiffFile(filename, **kwargs) as tif: + if page is not None: + page = tif.pages[page] + if not page.is_memmappable: + raise ValueError("image data are not memory-mappable") + offset, _ = page.is_contiguous + shape = page.shape + dtype = page.dtype + else: + series = tif.series[series] + if series.offset is None: + raise ValueError("image data are not memory-mappable") + shape = series.shape + dtype = series.dtype + offset = series.offset + dtype = tif.byteorder + dtype.char + return numpy.memmap(filename, dtype, mode, offset, shape, "C") + + +class lazyattr(object): + """Attribute whose value is computed on first access.""" + + # TODO: help() doesn't work + __slots__ = ("func",) + + def __init__(self, func): + self.func = func + # self.__name__ = func.__name__ + # self.__doc__ = func.__doc__ + # self.lock = threading.RLock() + + def __get__(self, instance, owner): + # with self.lock: + if instance is None: + return self + try: + value = self.func(instance) + except AttributeError as e: + raise RuntimeError(e) + if value is NotImplemented: + return getattr(super(owner, instance), self.func.__name__) + setattr(instance, self.func.__name__, value) + return value + + +class TiffWriter(object): + """Write numpy arrays to TIFF file. + + TiffWriter instances must be closed using the 'close' method, which is + automatically called when using the 'with' context manager. + + TiffWriter's main purpose is saving nD numpy array's as TIFF, + not to create any possible TIFF format. Specifically, JPEG compression, + SubIFDs, ExifIFD, or GPSIFD tags are not supported. + + Examples + -------- + >>> # successively append images to BigTIFF file + >>> data = numpy.random.rand(2, 5, 3, 301, 219) + >>> with TiffWriter('temp.tif', bigtiff=True) as tif: + ... for i in range(data.shape[0]): + ... tif.save(data[i], compress=6, photometric='minisblack') + + """ + + def __init__(self, file, bigtiff=False, byteorder=None, append=False, imagej=False): + """Open a TIFF file for writing. + + An empty TIFF file is created if the file does not exist, else the + file is overwritten with an empty TIFF file unless 'append' + is true. Use bigtiff=True when creating files larger than 4 GB. + + Parameters + ---------- + file : str, binary stream, or FileHandle + File name or writable binary stream, such as an open file + or BytesIO. + bigtiff : bool + If True, the BigTIFF format is used. + byteorder : {'<', '>', '=', '|'} + The endianness of the data in the file. + By default, this is the system's native byte order. + append : bool + If True and 'file' is an existing standard TIFF file, image data + and tags are appended to the file. + Appending data may corrupt specifically formatted TIFF files + such as LSM, STK, ImageJ, NIH, or FluoView. + imagej : bool + If True, write an ImageJ hyperstack compatible file. + This format can handle data types uint8, uint16, or float32 and + data shapes up to 6 dimensions in TZCYXS order. + RGB images (S=3 or S=4) must be uint8. + ImageJ's default byte order is big-endian but this implementation + uses the system's native byte order by default. + ImageJ does not support BigTIFF format or LZMA compression. + The ImageJ file format is undocumented. + + """ + if append: + # determine if file is an existing TIFF file that can be extended + try: + with FileHandle(file, mode="rb", size=0) as fh: + pos = fh.tell() + try: + with TiffFile(fh) as tif: + if append != "force" and any( + getattr(tif, "is_" + a) + for a in ( + "lsm", + "stk", + "imagej", + "nih", + "fluoview", + "micromanager", + ) + ): + raise ValueError("file contains metadata") + byteorder = tif.byteorder + bigtiff = tif.is_bigtiff + self._ifdoffset = tif.pages.next_page_offset + except Exception as e: + raise ValueError("cannot append to file: %s" % str(e)) + finally: + fh.seek(pos) + except (IOError, FileNotFoundError): + append = False + + if byteorder in (None, "=", "|"): + byteorder = "<" if sys.byteorder == "little" else ">" + elif byteorder not in ("<", ">"): + raise ValueError("invalid byteorder %s" % byteorder) + if imagej and bigtiff: + warnings.warn("writing incompatible BigTIFF ImageJ") + + self._byteorder = byteorder + self._imagej = bool(imagej) + self._truncate = False + self._metadata = None + self._colormap = None + + self._descriptionoffset = 0 + self._descriptionlen = 0 + self._descriptionlenoffset = 0 + self._tags = None + self._shape = None # normalized shape of data in consecutive pages + self._datashape = None # shape of data in consecutive pages + self._datadtype = None # data type + self._dataoffset = None # offset to data + self._databytecounts = None # byte counts per plane + self._tagoffsets = None # strip or tile offset tag code + + if bigtiff: + self._bigtiff = True + self._offsetsize = 8 + self._tagsize = 20 + self._tagnoformat = "Q" + self._offsetformat = "Q" + self._valueformat = "8s" + else: + self._bigtiff = False + self._offsetsize = 4 + self._tagsize = 12 + self._tagnoformat = "H" + self._offsetformat = "I" + self._valueformat = "4s" + + if append: + self._fh = FileHandle(file, mode="r+b", size=0) + self._fh.seek(0, 2) + else: + self._fh = FileHandle(file, mode="wb", size=0) + self._fh.write({"<": b"II", ">": b"MM"}[byteorder]) + if bigtiff: + self._fh.write(struct.pack(byteorder + "HHH", 43, 8, 0)) + else: + self._fh.write(struct.pack(byteorder + "H", 42)) + # first IFD + self._ifdoffset = self._fh.tell() + self._fh.write(struct.pack(byteorder + self._offsetformat, 0)) + + def save( + self, + data=None, + shape=None, + dtype=None, + returnoffset=False, + photometric=None, + planarconfig=None, + tile=None, + contiguous=True, + align=16, + truncate=False, + compress=0, + rowsperstrip=None, + predictor=False, + colormap=None, + description=None, + datetime=None, + resolution=None, + software="tifffile.py", + metadata={}, + ijmetadata=None, + extratags=(), + ): + """Write numpy array and tags to TIFF file. + + The data shape's last dimensions are assumed to be image depth, + height (length), width, and samples. + If a colormap is provided, the data's dtype must be uint8 or uint16 + and the data values are indices into the last dimension of the + colormap. + If 'shape' and 'dtype' are specified, an empty array is saved. + This option cannot be used with compression or multiple tiles. + Image data are written uncompressed in one strip per plane by default. + Dimensions larger than 2 to 4 (depending on photometric mode, planar + configuration, and SGI mode) are flattened and saved as separate pages. + The SampleFormat and BitsPerSample tags are derived from the data type. + + Parameters + ---------- + data : numpy.ndarray or None + Input image array. + shape : tuple or None + Shape of the empty array to save. Used only if 'data' is None. + dtype : numpy.dtype or None + Data-type of the empty array to save. Used only if 'data' is None. + returnoffset : bool + If True and the image data in the file is memory-mappable, return + the offset and number of bytes of the image data in the file. + photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'} + The color space of the image data. + By default, this setting is inferred from the data shape and the + value of colormap. + For CFA images, DNG tags must be specified in 'extratags'. + planarconfig : {'CONTIG', 'SEPARATE'} + Specifies if samples are stored contiguous or in separate planes. + By default, this setting is inferred from the data shape. + If this parameter is set, extra samples are used to store grayscale + images. + 'CONTIG': last dimension contains samples. + 'SEPARATE': third last dimension contains samples. + tile : tuple of int + The shape (depth, length, width) of image tiles to write. + If None (default), image data are written in strips. + The tile length and width must be a multiple of 16. + If the tile depth is provided, the SGI ImageDepth and TileDepth + tags are used to save volume data. + Unless a single tile is used, tiles cannot be used to write + contiguous files. + Few software can read the SGI format, e.g. MeVisLab. + contiguous : bool + If True (default) and the data and parameters are compatible with + previous ones, if any, the image data are stored contiguously after + the previous one. Parameters 'photometric' and 'planarconfig' + are ignored. Parameters 'description', datetime', and 'extratags' + are written to the first page of a contiguous series only. + align : int + Byte boundary on which to align the image data in the file. + Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data. + Following contiguous writes are not aligned. + truncate : bool + If True, only write the first page including shape metadata if + possible (uncompressed, contiguous, not tiled). + Other TIFF readers will only be able to read part of the data. + compress : int or 'LZMA', 'ZSTD' + Values from 0 to 9 controlling the level of zlib compression. + If 0 (default), data are written uncompressed. + Compression cannot be used to write contiguous files. + If 'LZMA' or 'ZSTD', LZMA or ZSTD compression is used, which is + not available on all platforms. + rowsperstrip : int + The number of rows per strip used for compression. + Uncompressed data are written in one strip per plane. + predictor : bool + If True, apply horizontal differencing to integer type images + before compression. + colormap : numpy.ndarray + RGB color values for the corresponding data value. + Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16. + description : str + The subject of the image. Must be 7-bit ASCII. Cannot be used with + the ImageJ format. Saved with the first page only. + datetime : datetime + Date and time of image creation in '%Y:%m:%d %H:%M:%S' format. + If None (default), the current date and time is used. + Saved with the first page only. + resolution : (float, float[, str]) or ((int, int), (int, int)[, str]) + X and Y resolutions in pixels per resolution unit as float or + rational numbers. A third, optional parameter specifies the + resolution unit, which must be None (default for ImageJ), + 'INCH' (default), or 'CENTIMETER'. + software : str + Name of the software used to create the file. Must be 7-bit ASCII. + Saved with the first page only. + metadata : dict + Additional meta data to be saved along with shape information + in JSON or ImageJ formats in an ImageDescription tag. + If None, do not write a second ImageDescription tag. + Strings must be 7-bit ASCII. Saved with the first page only. + ijmetadata : dict + Additional meta data to be saved in application specific + IJMetadata and IJMetadataByteCounts tags. Refer to the + imagej_metadata_tags function for valid keys and values. + Saved with the first page only. + extratags : sequence of tuples + Additional tags as [(code, dtype, count, value, writeonce)]. + + code : int + The TIFF tag Id. + dtype : str + Data type of items in 'value' in Python struct format. + One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q. + count : int + Number of data values. Not used for string or byte string + values. + value : sequence + 'Count' values compatible with 'dtype'. + Byte strings must contain count values of dtype packed as + binary data. + writeonce : bool + If True, the tag is written to the first page only. + + """ + # TODO: refactor this function + fh = self._fh + byteorder = self._byteorder + + if data is None: + if compress: + raise ValueError("cannot save compressed empty file") + datashape = shape + datadtype = numpy.dtype(dtype).newbyteorder(byteorder) + datadtypechar = datadtype.char + else: + data = numpy.asarray(data, byteorder + data.dtype.char, "C") + if data.size == 0: + raise ValueError("cannot save empty array") + datashape = data.shape + datadtype = data.dtype + datadtypechar = data.dtype.char + + returnoffset = returnoffset and datadtype.isnative + bilevel = datadtypechar == "?" + if bilevel: + index = -1 if datashape[-1] > 1 else -2 + datasize = product(datashape[:index]) + if datashape[index] % 8: + datasize *= datashape[index] // 8 + 1 + else: + datasize *= datashape[index] // 8 + else: + datasize = product(datashape) * datadtype.itemsize + + # just append contiguous data if possible + self._truncate = bool(truncate) + if self._datashape: + if ( + not contiguous + or self._datashape[1:] != datashape + or self._datadtype != datadtype + or (compress and self._tags) + or tile + or not numpy.array_equal(colormap, self._colormap) + ): + # incompatible shape, dtype, compression mode, or colormap + self._write_remaining_pages() + self._write_image_description() + self._truncate = False + self._descriptionoffset = 0 + self._descriptionlenoffset = 0 + self._datashape = None + self._colormap = None + if self._imagej: + raise ValueError("ImageJ does not support non-contiguous data") + else: + # consecutive mode + self._datashape = (self._datashape[0] + 1,) + datashape + if not compress: + # write contiguous data, write IFDs/tags later + offset = fh.tell() + if data is None: + fh.write_empty(datasize) + else: + fh.write_array(data) + if returnoffset: + return offset, datasize + return + + input_shape = datashape + tagnoformat = self._tagnoformat + valueformat = self._valueformat + offsetformat = self._offsetformat + offsetsize = self._offsetsize + tagsize = self._tagsize + + MINISBLACK = TIFF.PHOTOMETRIC.MINISBLACK + RGB = TIFF.PHOTOMETRIC.RGB + CFA = TIFF.PHOTOMETRIC.CFA + PALETTE = TIFF.PHOTOMETRIC.PALETTE + CONTIG = TIFF.PLANARCONFIG.CONTIG + SEPARATE = TIFF.PLANARCONFIG.SEPARATE + + # parse input + if photometric is not None: + photometric = enumarg(TIFF.PHOTOMETRIC, photometric) + if planarconfig: + planarconfig = enumarg(TIFF.PLANARCONFIG, planarconfig) + if not compress: + compress = False + compresstag = 1 + predictor = False + else: + if isinstance(compress, (tuple, list)): + compress, compresslevel = compress + elif isinstance(compress, int): + compress, compresslevel = "ADOBE_DEFLATE", int(compress) + if not 0 <= compresslevel <= 9: + raise ValueError("invalid compression level %s" % compress) + else: + compresslevel = None + compress = compress.upper() + compresstag = enumarg(TIFF.COMPRESSION, compress) + + # prepare ImageJ format + if self._imagej: + if compress in ("LZMA", "ZSTD"): + raise ValueError("ImageJ cannot handle LZMA or ZSTD compression") + if description: + warnings.warn("not writing description to ImageJ file") + description = None + volume = False + if datadtypechar not in "BHhf": + raise ValueError("ImageJ does not support data type %s" % datadtypechar) + ijrgb = photometric == RGB if photometric else None + if datadtypechar not in "B": + ijrgb = False + ijshape = imagej_shape(datashape, ijrgb) + if ijshape[-1] in (3, 4): + photometric = RGB + if datadtypechar not in "B": + raise ValueError( + "ImageJ does not support data type %s " + "for RGB" % datadtypechar + ) + elif photometric is None: + photometric = MINISBLACK + planarconfig = None + if planarconfig == SEPARATE: + raise ValueError("ImageJ does not support planar images") + else: + planarconfig = CONTIG if ijrgb else None + + # define compress function + if compress: + if compresslevel is None: + compressor, compresslevel = TIFF.COMPESSORS[compresstag] + else: + compressor, _ = TIFF.COMPESSORS[compresstag] + compresslevel = int(compresslevel) + if predictor: + if datadtype.kind not in "iu": + raise ValueError("prediction not implemented for %s" % datadtype) + + def compress(data, level=compresslevel): + # horizontal differencing + diff = numpy.diff(data, axis=-2) + data = numpy.insert(diff, 0, data[..., 0, :], axis=-2) + return compressor(data, level) + + else: + + def compress(data, level=compresslevel): + return compressor(data, level) + + # verify colormap and indices + if colormap is not None: + if datadtypechar not in "BH": + raise ValueError("invalid data dtype for palette mode") + colormap = numpy.asarray(colormap, dtype=byteorder + "H") + if colormap.shape != (3, 2 ** (datadtype.itemsize * 8)): + raise ValueError("invalid color map shape") + self._colormap = colormap + + # verify tile shape + if tile: + tile = tuple(int(i) for i in tile[:3]) + volume = len(tile) == 3 + if ( + len(tile) < 2 + or tile[-1] % 16 + or tile[-2] % 16 + or any(i < 1 for i in tile) + ): + raise ValueError("invalid tile shape") + else: + tile = () + volume = False + + # normalize data shape to 5D or 6D, depending on volume: + # (pages, planar_samples, [depth,] height, width, contig_samples) + datashape = reshape_nd(datashape, 3 if photometric == RGB else 2) + shape = datashape + ndim = len(datashape) + + samplesperpixel = 1 + extrasamples = 0 + if volume and ndim < 3: + volume = False + if colormap is not None: + photometric = PALETTE + planarconfig = None + if photometric is None: + photometric = MINISBLACK + if bilevel: + photometric = TIFF.PHOTOMETRIC.MINISWHITE + elif planarconfig == CONTIG: + if ndim > 2 and shape[-1] in (3, 4): + photometric = RGB + elif planarconfig == SEPARATE: + if volume and ndim > 3 and shape[-4] in (3, 4): + photometric = RGB + elif ndim > 2 and shape[-3] in (3, 4): + photometric = RGB + elif ndim > 2 and shape[-1] in (3, 4): + photometric = RGB + elif self._imagej: + photometric = MINISBLACK + elif volume and ndim > 3 and shape[-4] in (3, 4): + photometric = RGB + elif ndim > 2 and shape[-3] in (3, 4): + photometric = RGB + if planarconfig and len(shape) <= (3 if volume else 2): + planarconfig = None + photometric = MINISBLACK + if photometric == RGB: + if len(shape) < 3: + raise ValueError("not a RGB(A) image") + if len(shape) < 4: + volume = False + if planarconfig is None: + if shape[-1] in (3, 4): + planarconfig = CONTIG + elif shape[-4 if volume else -3] in (3, 4): + planarconfig = SEPARATE + elif shape[-1] > shape[-4 if volume else -3]: + planarconfig = SEPARATE + else: + planarconfig = CONTIG + if planarconfig == CONTIG: + datashape = (-1, 1) + shape[(-4 if volume else -3) :] + samplesperpixel = datashape[-1] + else: + datashape = (-1,) + shape[(-4 if volume else -3) :] + (1,) + samplesperpixel = datashape[1] + if samplesperpixel > 3: + extrasamples = samplesperpixel - 3 + elif photometric == CFA: + if len(shape) != 2: + raise ValueError("invalid CFA image") + volume = False + planarconfig = None + datashape = (-1, 1) + shape[-2:] + (1,) + if 50706 not in (et[0] for et in extratags): + raise ValueError("must specify DNG tags for CFA image") + elif planarconfig and len(shape) > (3 if volume else 2): + if planarconfig == CONTIG: + datashape = (-1, 1) + shape[(-4 if volume else -3) :] + samplesperpixel = datashape[-1] + else: + datashape = (-1,) + shape[(-4 if volume else -3) :] + (1,) + samplesperpixel = datashape[1] + extrasamples = samplesperpixel - 1 + else: + planarconfig = None + # remove trailing 1s + while len(shape) > 2 and shape[-1] == 1: + shape = shape[:-1] + if len(shape) < 3: + volume = False + datashape = (-1, 1) + shape[(-3 if volume else -2) :] + (1,) + + # normalize shape to 6D + assert len(datashape) in (5, 6) + if len(datashape) == 5: + datashape = datashape[:2] + (1,) + datashape[2:] + if datashape[0] == -1: + s0 = product(input_shape) // product(datashape[1:]) + datashape = (s0,) + datashape[1:] + shape = datashape + if data is not None: + data = data.reshape(shape) + + if tile and not volume: + tile = (1, tile[-2], tile[-1]) + + if photometric == PALETTE: + if samplesperpixel != 1 or extrasamples or shape[1] != 1 or shape[-1] != 1: + raise ValueError("invalid data shape for palette mode") + + if photometric == RGB and samplesperpixel == 2: + raise ValueError("not a RGB image (samplesperpixel=2)") + + if bilevel: + if compress: + raise ValueError("cannot save compressed bilevel image") + if tile: + raise ValueError("cannot save tiled bilevel image") + if photometric not in (0, 1): + raise ValueError("cannot save bilevel image as %s" % str(photometric)) + datashape = list(datashape) + if datashape[-2] % 8: + datashape[-2] = datashape[-2] // 8 + 1 + else: + datashape[-2] = datashape[-2] // 8 + datashape = tuple(datashape) + assert datasize == product(datashape) + if data is not None: + data = numpy.packbits(data, axis=-2) + assert datashape[-2] == data.shape[-2] + + bytestr = ( + bytes + if sys.version[0] == "2" + else (lambda x: bytes(x, "ascii") if isinstance(x, str) else x) + ) + tags = [] # list of (code, ifdentry, ifdvalue, writeonce) + + strip_or_tile = "Tile" if tile else "Strip" + tagbytecounts = TIFF.TAG_NAMES[strip_or_tile + "ByteCounts"] + tag_offsets = TIFF.TAG_NAMES[strip_or_tile + "Offsets"] + self._tagoffsets = tag_offsets + + def pack(fmt, *val): + return struct.pack(byteorder + fmt, *val) + + def addtag(code, dtype, count, value, writeonce=False): + # Compute ifdentry & ifdvalue bytes from code, dtype, count, value + # Append (code, ifdentry, ifdvalue, writeonce) to tags list + code = int(TIFF.TAG_NAMES.get(code, code)) + try: + tifftype = TIFF.DATA_DTYPES[dtype] + except KeyError: + raise ValueError("unknown dtype %s" % dtype) + rawcount = count + + if dtype == "s": + # strings + value = bytestr(value) + b"\0" + count = rawcount = len(value) + rawcount = value.find(b"\0\0") + if rawcount < 0: + rawcount = count + else: + rawcount += 1 # length of string without buffer + value = (value,) + elif isinstance(value, bytes): + # packed binary data + dtsize = struct.calcsize(dtype) + if len(value) % dtsize: + raise ValueError("invalid packed binary data") + count = len(value) // dtsize + if len(dtype) > 1: + count *= int(dtype[:-1]) + dtype = dtype[-1] + ifdentry = [pack("HH", code, tifftype), pack(offsetformat, rawcount)] + ifdvalue = None + if struct.calcsize(dtype) * count <= offsetsize: + # value(s) can be written directly + if isinstance(value, bytes): + ifdentry.append(pack(valueformat, value)) + elif count == 1: + if isinstance(value, (tuple, list, numpy.ndarray)): + value = value[0] + ifdentry.append(pack(valueformat, pack(dtype, value))) + else: + ifdentry.append(pack(valueformat, pack(str(count) + dtype, *value))) + else: + # use offset to value(s) + ifdentry.append(pack(offsetformat, 0)) + if isinstance(value, bytes): + ifdvalue = value + elif isinstance(value, numpy.ndarray): + assert value.size == count + assert value.dtype.char == dtype + ifdvalue = value.tostring() + elif isinstance(value, (tuple, list)): + ifdvalue = pack(str(count) + dtype, *value) + else: + ifdvalue = pack(dtype, value) + tags.append((code, b"".join(ifdentry), ifdvalue, writeonce)) + + def rational(arg, max_denominator=1000000): + """ "Return nominator and denominator from float or two integers.""" + from fractions import Fraction # delayed import + + try: + f = Fraction.from_float(arg) + except TypeError: + f = Fraction(arg[0], arg[1]) + f = f.limit_denominator(max_denominator) + return f.numerator, f.denominator + + if description: + # user provided description + addtag("ImageDescription", "s", 0, description, writeonce=True) + + # write shape and metadata to ImageDescription + self._metadata = {} if not metadata else metadata.copy() + if self._imagej: + description = imagej_description( + input_shape, + shape[-1] in (3, 4), + self._colormap is not None, + **self._metadata + ) + elif metadata or metadata == {}: + if self._truncate: + self._metadata.update(truncated=True) + description = json_description(input_shape, **self._metadata) + else: + description = None + if description: + # add 64 bytes buffer + # the image description might be updated later with the final shape + description = str2bytes(description, "ascii") + description += b"\0" * 64 + self._descriptionlen = len(description) + addtag("ImageDescription", "s", 0, description, writeonce=True) + + if software: + addtag("Software", "s", 0, software, writeonce=True) + if datetime is None: + datetime = self._now() + addtag( + "DateTime", "s", 0, datetime.strftime("%Y:%m:%d %H:%M:%S"), writeonce=True + ) + addtag("Compression", "H", 1, compresstag) + if predictor: + addtag("Predictor", "H", 1, 2) + addtag("ImageWidth", "I", 1, shape[-2]) + addtag("ImageLength", "I", 1, shape[-3]) + if tile: + addtag("TileWidth", "I", 1, tile[-1]) + addtag("TileLength", "I", 1, tile[-2]) + if tile[0] > 1: + addtag("ImageDepth", "I", 1, shape[-4]) + addtag("TileDepth", "I", 1, tile[0]) + addtag("NewSubfileType", "I", 1, 0) + if not bilevel: + sampleformat = {"u": 1, "i": 2, "f": 3, "c": 6}[datadtype.kind] + addtag( + "SampleFormat", "H", samplesperpixel, (sampleformat,) * samplesperpixel + ) + addtag("PhotometricInterpretation", "H", 1, photometric.value) + if colormap is not None: + addtag("ColorMap", "H", colormap.size, colormap) + addtag("SamplesPerPixel", "H", 1, samplesperpixel) + if bilevel: + pass + elif planarconfig and samplesperpixel > 1: + addtag("PlanarConfiguration", "H", 1, planarconfig.value) + addtag( + "BitsPerSample", + "H", + samplesperpixel, + (datadtype.itemsize * 8,) * samplesperpixel, + ) + else: + addtag("BitsPerSample", "H", 1, datadtype.itemsize * 8) + if extrasamples: + if photometric == RGB and extrasamples == 1: + addtag("ExtraSamples", "H", 1, 1) # associated alpha channel + else: + addtag("ExtraSamples", "H", extrasamples, (0,) * extrasamples) + if resolution is not None: + addtag("XResolution", "2I", 1, rational(resolution[0])) + addtag("YResolution", "2I", 1, rational(resolution[1])) + if len(resolution) > 2: + unit = resolution[2] + unit = 1 if unit is None else enumarg(TIFF.RESUNIT, unit) + elif self._imagej: + unit = 1 + else: + unit = 2 + addtag("ResolutionUnit", "H", 1, unit) + elif not self._imagej: + addtag("XResolution", "2I", 1, (1, 1)) + addtag("YResolution", "2I", 1, (1, 1)) + addtag("ResolutionUnit", "H", 1, 1) + if ijmetadata: + for t in imagej_metadata_tags(ijmetadata, byteorder): + addtag(*t) + + contiguous = not compress + if tile: + # one chunk per tile per plane + tiles = ( + (shape[2] + tile[0] - 1) // tile[0], + (shape[3] + tile[1] - 1) // tile[1], + (shape[4] + tile[2] - 1) // tile[2], + ) + numtiles = product(tiles) * shape[1] + stripbytecounts = [ + product(tile) * shape[-1] * datadtype.itemsize + ] * numtiles + addtag(tagbytecounts, offsetformat, numtiles, stripbytecounts) + addtag(tag_offsets, offsetformat, numtiles, [0] * numtiles) + contiguous = contiguous and product(tiles) == 1 + if not contiguous: + # allocate tile buffer + chunk = numpy.empty(tile + (shape[-1],), dtype=datadtype) + elif contiguous: + # one strip per plane + if bilevel: + stripbytecounts = [product(datashape[2:])] * shape[1] + else: + stripbytecounts = [product(datashape[2:]) * datadtype.itemsize] * shape[ + 1 + ] + addtag(tagbytecounts, offsetformat, shape[1], stripbytecounts) + addtag(tag_offsets, offsetformat, shape[1], [0] * shape[1]) + addtag("RowsPerStrip", "I", 1, shape[-3]) + else: + # compress rowsperstrip or ~64 KB chunks + rowsize = product(shape[-2:]) * datadtype.itemsize + if rowsperstrip is None: + rowsperstrip = 65536 // rowsize + if rowsperstrip < 1: + rowsperstrip = 1 + elif rowsperstrip > shape[-3]: + rowsperstrip = shape[-3] + addtag("RowsPerStrip", "I", 1, rowsperstrip) + + numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip + numstrips *= shape[1] + stripbytecounts = [0] * numstrips + addtag(tagbytecounts, offsetformat, numstrips, [0] * numstrips) + addtag(tag_offsets, offsetformat, numstrips, [0] * numstrips) + + if data is None and not contiguous: + raise ValueError("cannot write non-contiguous empty file") + + # add extra tags from user + for t in extratags: + addtag(*t) + + # TODO: check TIFFReadDirectoryCheckOrder warning in files containing + # multiple tags of same code + # the entries in an IFD must be sorted in ascending order by tag code + tags = sorted(tags, key=lambda x: x[0]) + + if not (self._bigtiff or self._imagej) and (fh.tell() + datasize > 2**31 - 1): + raise ValueError("data too large for standard TIFF file") + + # if not compressed or multi-tiled, write the first IFD and then + # all data contiguously; else, write all IFDs and data interleaved + for pageindex in range(1 if contiguous else shape[0]): + # update pointer at ifd_offset + pos = fh.tell() + if pos % 2: + # location of IFD must begin on a word boundary + fh.write(b"\0") + pos += 1 + fh.seek(self._ifdoffset) + fh.write(pack(offsetformat, pos)) + fh.seek(pos) + + # write ifdentries + fh.write(pack(tagnoformat, len(tags))) + tag_offset = fh.tell() + fh.write(b"".join(t[1] for t in tags)) + self._ifdoffset = fh.tell() + fh.write(pack(offsetformat, 0)) # offset to next IFD + + # write tag values and patch offsets in ifdentries, if necessary + for tagindex, tag in enumerate(tags): + if tag[2]: + pos = fh.tell() + if pos % 2: + # tag value is expected to begin on word boundary + fh.write(b"\0") + pos += 1 + fh.seek(tag_offset + tagindex * tagsize + offsetsize + 4) + fh.write(pack(offsetformat, pos)) + fh.seek(pos) + if tag[0] == tag_offsets: + stripoffsetsoffset = pos + elif tag[0] == tagbytecounts: + strip_bytecounts_offset = pos + elif tag[0] == 270 and tag[2].endswith(b"\0\0\0\0"): + # image description buffer + self._descriptionoffset = pos + self._descriptionlenoffset = tag_offset + tagindex * tagsize + 4 + fh.write(tag[2]) + + # write image data + data_offset = fh.tell() + skip = align - data_offset % align + fh.seek(skip, 1) + data_offset += skip + if contiguous: + if data is None: + fh.write_empty(datasize) + else: + fh.write_array(data) + elif tile: + if data is None: + fh.write_empty(numtiles * stripbytecounts[0]) + else: + stripindex = 0 + for plane in data[pageindex]: + for tz in range(tiles[0]): + for ty in range(tiles[1]): + for tx in range(tiles[2]): + c0 = min(tile[0], shape[2] - tz * tile[0]) + c1 = min(tile[1], shape[3] - ty * tile[1]) + c2 = min(tile[2], shape[4] - tx * tile[2]) + chunk[c0:, c1:, c2:] = 0 + chunk[:c0, :c1, :c2] = plane[ + tz * tile[0] : tz * tile[0] + c0, + ty * tile[1] : ty * tile[1] + c1, + tx * tile[2] : tx * tile[2] + c2, + ] + if compress: + t = compress(chunk) + fh.write(t) + stripbytecounts[stripindex] = len(t) + stripindex += 1 + else: + fh.write_array(chunk) + fh.flush() + elif compress: + # write one strip per rowsperstrip + assert data.shape[2] == 1 # not handling depth + numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip + stripindex = 0 + for plane in data[pageindex]: + for i in range(numstrips): + strip = plane[0, i * rowsperstrip : (i + 1) * rowsperstrip] + strip = compress(strip) + fh.write(strip) + stripbytecounts[stripindex] = len(strip) + stripindex += 1 + + # update strip/tile offsets and bytecounts if necessary + pos = fh.tell() + for tagindex, tag in enumerate(tags): + if tag[0] == tag_offsets: # strip/tile offsets + if tag[2]: + fh.seek(stripoffsetsoffset) + strip_offset = data_offset + for size in stripbytecounts: + fh.write(pack(offsetformat, strip_offset)) + strip_offset += size + else: + fh.seek(tag_offset + tagindex * tagsize + offsetsize + 4) + fh.write(pack(offsetformat, data_offset)) + elif tag[0] == tagbytecounts: # strip/tile bytecounts + if compress: + if tag[2]: + fh.seek(strip_bytecounts_offset) + for size in stripbytecounts: + fh.write(pack(offsetformat, size)) + else: + fh.seek(tag_offset + tagindex * tagsize + offsetsize + 4) + fh.write(pack(offsetformat, stripbytecounts[0])) + break + fh.seek(pos) + fh.flush() + + # remove tags that should be written only once + if pageindex == 0: + tags = [tag for tag in tags if not tag[-1]] + + self._shape = shape + self._datashape = (1,) + input_shape + self._datadtype = datadtype + self._dataoffset = data_offset + self._databytecounts = stripbytecounts + + if contiguous: + # write remaining IFDs/tags later + self._tags = tags + # return offset and size of image data + if returnoffset: + return data_offset, sum(stripbytecounts) + + def _write_remaining_pages(self): + """Write outstanding IFDs and tags to file.""" + if not self._tags or self._truncate: + return + + fh = self._fh + fhpos = fh.tell() + if fhpos % 2: + fh.write(b"\0") + fhpos += 1 + byteorder = self._byteorder + offsetformat = self._offsetformat + offsetsize = self._offsetsize + tagnoformat = self._tagnoformat + tagsize = self._tagsize + dataoffset = self._dataoffset + pagedatasize = sum(self._databytecounts) + pageno = self._shape[0] * self._datashape[0] - 1 + + def pack(fmt, *val): + return struct.pack(byteorder + fmt, *val) + + # construct template IFD in memory + # need to patch offsets to next IFD and data before writing to disk + ifd = io.BytesIO() + ifd.write(pack(tagnoformat, len(self._tags))) + tagoffset = ifd.tell() + ifd.write(b"".join(t[1] for t in self._tags)) + ifdoffset = ifd.tell() + ifd.write(pack(offsetformat, 0)) # offset to next IFD + # tag values + for tagindex, tag in enumerate(self._tags): + offset2value = tagoffset + tagindex * tagsize + offsetsize + 4 + if tag[2]: + pos = ifd.tell() + if pos % 2: # tag value is expected to begin on word boundary + ifd.write(b"\0") + pos += 1 + ifd.seek(offset2value) + try: + ifd.write(pack(offsetformat, pos + fhpos)) + except Exception: # struct.error + if self._imagej: + warnings.warn("truncating ImageJ file") + self._truncate = True + return + raise ValueError("data too large for non-BigTIFF file") + ifd.seek(pos) + ifd.write(tag[2]) + if tag[0] == self._tagoffsets: + # save strip/tile offsets for later updates + stripoffset2offset = offset2value + stripoffset2value = pos + elif tag[0] == self._tagoffsets: + # save strip/tile offsets for later updates + stripoffset2offset = None + stripoffset2value = offset2value + # size to word boundary + if ifd.tell() % 2: + ifd.write(b"\0") + + # check if all IFDs fit in file + pos = fh.tell() + if not self._bigtiff and pos + ifd.tell() * pageno > 2**32 - 256: + if self._imagej: + warnings.warn("truncating ImageJ file") + self._truncate = True + return + raise ValueError("data too large for non-BigTIFF file") + + # TODO: assemble IFD chain in memory + for _ in range(pageno): + # update pointer at IFD offset + pos = fh.tell() + fh.seek(self._ifdoffset) + fh.write(pack(offsetformat, pos)) + fh.seek(pos) + self._ifdoffset = pos + ifdoffset + # update strip/tile offsets in IFD + dataoffset += pagedatasize # offset to image data + if stripoffset2offset is None: + ifd.seek(stripoffset2value) + ifd.write(pack(offsetformat, dataoffset)) + else: + ifd.seek(stripoffset2offset) + ifd.write(pack(offsetformat, pos + stripoffset2value)) + ifd.seek(stripoffset2value) + stripoffset = dataoffset + for size in self._databytecounts: + ifd.write(pack(offsetformat, stripoffset)) + stripoffset += size + # write IFD entry + fh.write(ifd.getvalue()) + + self._tags = None + self._datadtype = None + self._dataoffset = None + self._databytecounts = None + # do not reset _shape or _data_shape + + def _write_image_description(self): + """Write meta data to ImageDescription tag.""" + if ( + not self._datashape + or self._datashape[0] == 1 + or self._descriptionoffset <= 0 + ): + return + + colormapped = self._colormap is not None + if self._imagej: + isrgb = self._shape[-1] in (3, 4) + description = imagej_description( + self._datashape, isrgb, colormapped, **self._metadata + ) + else: + description = json_description(self._datashape, **self._metadata) + + # rewrite description and its length to file + description = description.encode("utf-8") + description = description[: self._descriptionlen - 1] + pos = self._fh.tell() + self._fh.seek(self._descriptionoffset) + self._fh.write(description) + self._fh.seek(self._descriptionlenoffset) + self._fh.write( + struct.pack(self._byteorder + self._offsetformat, len(description) + 1) + ) + self._fh.seek(pos) + + self._descriptionoffset = 0 + self._descriptionlenoffset = 0 + self._descriptionlen = 0 + + def _now(self): + """Return current date and time.""" + return datetime.datetime.now() + + def close(self): + """Write remaining pages and close file handle.""" + if not self._truncate: + self._write_remaining_pages() + self._write_image_description() + self._fh.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + +class TiffFile(object): + """Read image and metadata from TIFF file. + + TiffFile instances must be closed using the 'close' method, which is + automatically called when using the 'with' context manager. + + Attributes + ---------- + pages : TiffPages + Sequence of TIFF pages in file. + series : list of TiffPageSeries + Sequences of closely related TIFF pages. These are computed + from OME, LSM, ImageJ, etc. metadata or based on similarity + of page properties such as shape, dtype, and compression. + byteorder : '>', '<' + The endianness of data in the file. + '>': big-endian (Motorola). + '>': little-endian (Intel). + is_flag : bool + If True, file is of a certain format. + Flags are: bigtiff, movie, shaped, ome, imagej, stk, lsm, fluoview, + nih, vista, 'micromanager, metaseries, mdgel, mediacy, tvips, fei, + sem, scn, svs, scanimage, andor, epics, pilatus, qptiff. + + All attributes are read-only. + + Examples + -------- + >>> # read image array from TIFF file + >>> imsave('temp.tif', numpy.random.rand(5, 301, 219)) + >>> with TiffFile('temp.tif') as tif: + ... data = tif.asarray() + >>> data.shape + (5, 301, 219) + + """ + + def __init__( + self, + arg, + name=None, + offset=None, + size=None, + multifile=True, + movie=None, + **kwargs + ): + """Initialize instance from file. + + Parameters + ---------- + arg : str or open file + Name of file or open file object. + The file objects are closed in TiffFile.close(). + name : str + Optional name of file in case 'arg' is a file handle. + offset : int + Optional start position of embedded file. By default, this is + the current file position. + size : int + Optional size of embedded file. By default, this is the number + of bytes from the 'offset' to the end of the file. + multifile : bool + If True (default), series may include pages from multiple files. + Currently applies to OME-TIFF only. + movie : bool + If True, assume that later pages differ from first page only by + data offsets and byte counts. Significantly increases speed and + reduces memory usage when reading movies with thousands of pages. + Enabling this for non-movie files will result in data corruption + or crashes. Python 3 only. + kwargs : bool + 'is_ome': If False, disable processing of OME-XML metadata. + + """ + if "fastij" in kwargs: + del kwargs["fastij"] + raise DeprecationWarning("the fastij option will be removed") + for key, value in kwargs.items(): + if key[:3] == "is_" and key[3:] in TIFF.FILE_FLAGS: + if value is not None and not value: + setattr(self, key, bool(value)) + else: + raise TypeError("unexpected keyword argument: %s" % key) + + fh = FileHandle(arg, mode="rb", name=name, offset=offset, size=size) + self._fh = fh + self._multifile = bool(multifile) + self._files = {fh.name: self} # cache of TiffFiles + try: + fh.seek(0) + try: + byteorder = {b"II": "<", b"MM": ">"}[fh.read(2)] + except KeyError: + raise ValueError("not a TIFF file") + sys_byteorder = {"big": ">", "little": "<"}[sys.byteorder] + self.isnative = byteorder == sys_byteorder + + version = struct.unpack(byteorder + "H", fh.read(2))[0] + if version == 43: + # BigTiff + self.is_bigtiff = True + offsetsize, zero = struct.unpack(byteorder + "HH", fh.read(4)) + if zero or offsetsize != 8: + raise ValueError("invalid BigTIFF file") + self.byteorder = byteorder + self.offsetsize = 8 + self.offsetformat = byteorder + "Q" + self.tagnosize = 8 + self.tagnoformat = byteorder + "Q" + self.tagsize = 20 + self.tagformat1 = byteorder + "HH" + self.tagformat2 = byteorder + "Q8s" + elif version == 42: + self.is_bigtiff = False + self.byteorder = byteorder + self.offsetsize = 4 + self.offsetformat = byteorder + "I" + self.tagnosize = 2 + self.tagnoformat = byteorder + "H" + self.tagsize = 12 + self.tagformat1 = byteorder + "HH" + self.tagformat2 = byteorder + "I4s" + else: + raise ValueError("invalid TIFF file") + + # file handle is at offset to offset to first page + self.pages = TiffPages(self) + + if self.is_lsm and ( + self.filehandle.size >= 2**32 + or self.pages[0].compression != 1 + or self.pages[1].compression != 1 + ): + self._lsm_load_pages() + self._lsm_fix_strip_offsets() + self._lsm_fix_strip_bytecounts() + elif movie: + self.pages.useframes = True + + except Exception: + fh.close() + raise + + @property + def filehandle(self): + """Return file handle.""" + return self._fh + + @property + def filename(self): + """Return name of file handle.""" + return self._fh.name + + @lazyattr + def fstat(self): + """Return status of file handle as stat_result object.""" + try: + return os.fstat(self._fh.fileno()) + except Exception: # io.UnsupportedOperation + return None + + def close(self): + """Close open file handle(s).""" + for tif in self._files.values(): + tif.filehandle.close() + self._files = {} + + def asarray(self, key=None, series=None, out=None, validate=True, maxworkers=1): + """Return image data from multiple TIFF pages as numpy array. + + By default, the data from the first series is returned. + + Parameters + ---------- + key : int, slice, or sequence of page indices + Defines which pages to return as array. + series : int or TiffPageSeries + Defines which series of pages to return as array. + out : numpy.ndarray, str, or file-like object; optional + Buffer where image data will be saved. + If None (default), a new array will be created. + If numpy.ndarray, a writable array of compatible dtype and shape. + If 'memmap', directly memory-map the image data in the TIFF file + if possible; else create a memory-mapped array in a temporary file. + If str or open file, the file name or file object used to + create a memory-map to an array stored in a binary file on disk. + validate : bool + If True (default), validate various tags. + Passed to TiffPage.asarray(). + maxworkers : int + Maximum number of threads to concurrently get data from pages. + Default is 1. If None, up to half the CPU cores are used. + Reading data from file is limited to a single thread. + Using multiple threads can significantly speed up this function + if the bottleneck is decoding compressed data, e.g. in case of + large LZW compressed LSM files. + If the bottleneck is I/O or pure Python code, using multiple + threads might be detrimental. + + """ + if not self.pages: + return numpy.array([]) + if key is None and series is None: + series = 0 + if series is not None: + try: + series = self.series[series] + except (KeyError, TypeError): + pass + pages = series._pages + else: + pages = self.pages + + if key is None: + pass + elif isinstance(key, inttypes): + pages = [pages[key]] + elif isinstance(key, slice): + pages = pages[key] + elif isinstance(key, collections.Iterable): + pages = [pages[k] for k in key] + else: + raise TypeError("key must be an int, slice, or sequence") + + if not pages: + raise ValueError("no pages selected") + + if self.is_nih: + result = stack_pages(pages, out=out, maxworkers=maxworkers, squeeze=False) + elif key is None and series and series.offset: + typecode = self.byteorder + series.dtype.char + if out == "memmap" and pages[0].is_memmappable: + result = self.filehandle.memmap_array( + typecode, series.shape, series.offset + ) + else: + if out is not None: + out = create_output(out, series.shape, series.dtype) + self.filehandle.seek(series.offset) + result = self.filehandle.read_array( + typecode, product(series.shape), out=out, native=True + ) + elif len(pages) == 1: + result = pages[0].asarray(out=out, validate=validate) + else: + result = stack_pages(pages, out=out, maxworkers=maxworkers) + + if result is None: + return + + if key is None: + try: + result.shape = series.shape + except ValueError: + try: + warnings.warn( + "failed to reshape %s to %s" % (result.shape, series.shape) + ) + # try series of expected shapes + result.shape = (-1,) + series.shape + except ValueError: + # revert to generic shape + result.shape = (-1,) + pages[0].shape + elif len(pages) == 1: + result.shape = pages[0].shape + else: + result.shape = (-1,) + pages[0].shape + return result + + @lazyattr + def series(self): + """Return related pages as TiffPageSeries. + + Side effect: after calling this function, TiffFile.pages might contain + TiffPage and TiffFrame instances. + + """ + if not self.pages: + return [] + + useframes = self.pages.useframes + keyframe = self.pages.keyframe + series = [] + for name in "ome imagej lsm fluoview nih mdgel shaped".split(): + if getattr(self, "is_" + name, False): + series = getattr(self, "_%s_series" % name)() + break + self.pages.useframes = useframes + self.pages.keyframe = keyframe + if not series: + series = self._generic_series() + + # remove empty series, e.g. in MD Gel files + series = [s for s in series if sum(s.shape) > 0] + + for i, s in enumerate(series): + s.index = i + return series + + def _generic_series(self): + """Return image series in file.""" + if self.pages.useframes: + # movie mode + page = self.pages[0] + shape = page.shape + axes = page.axes + if len(self.pages) > 1: + shape = (len(self.pages),) + shape + axes = "I" + axes + return [ + TiffPageSeries(self.pages[:], shape, page.dtype, axes, stype="movie") + ] + + self.pages.clear(False) + self.pages.load() + result = [] + keys = [] + series = {} + compressions = TIFF.DECOMPESSORS + for page in self.pages: + if not page.shape: + continue + key = page.shape + (page.axes, page.compression in compressions) + if key in series: + series[key].append(page) + else: + keys.append(key) + series[key] = [page] + for key in keys: + pages = series[key] + page = pages[0] + shape = page.shape + axes = page.axes + if len(pages) > 1: + shape = (len(pages),) + shape + axes = "I" + axes + result.append( + TiffPageSeries(pages, shape, page.dtype, axes, stype="Generic") + ) + + return result + + def _shaped_series(self): + """Return image series in "shaped" file.""" + pages = self.pages + pages.useframes = True + lenpages = len(pages) + + def append_series(series, pages, axes, shape, reshape, name, truncated): + page = pages[0] + if not axes: + shape = page.shape + axes = page.axes + if len(pages) > 1: + shape = (len(pages),) + shape + axes = "Q" + axes + size = product(shape) + resize = product(reshape) + if page.is_contiguous and resize > size and resize % size == 0: + if truncated is None: + truncated = True + axes = "Q" + axes + shape = (resize // size,) + shape + try: + axes = reshape_axes(axes, shape, reshape) + shape = reshape + except ValueError as e: + warnings.warn(str(e)) + series.append( + TiffPageSeries( + pages, + shape, + page.dtype, + axes, + name=name, + stype="Shaped", + truncated=truncated, + ) + ) + + keyframe = axes = shape = reshape = name = None + series = [] + index = 0 + while True: + if index >= lenpages: + break + # new keyframe; start of new series + pages.keyframe = index + keyframe = pages[index] + if not keyframe.is_shaped: + warnings.warn("invalid shape metadata or corrupted file") + return + # read metadata + axes = None + shape = None + metadata = json_description_metadata(keyframe.is_shaped) + name = metadata.get("name", "") + reshape = metadata["shape"] + truncated = metadata.get("truncated", None) + if "axes" in metadata: + axes = metadata["axes"] + if len(axes) == len(reshape): + shape = reshape + else: + axes = "" + warnings.warn("axes do not match shape") + # skip pages if possible + spages = [keyframe] + size = product(reshape) + npages, mod = divmod(size, product(keyframe.shape)) + if mod: + warnings.warn("series shape does not match page shape") + return + if 1 < npages <= lenpages - index: + size *= keyframe._dtype.itemsize + if truncated: + npages = 1 + elif ( + keyframe.is_final + and keyframe.offset + size < pages[index + 1].offset + ): + truncated = False + else: + # need to read all pages for series + truncated = False + for j in range(index + 1, index + npages): + page = pages[j] + page.keyframe = keyframe + spages.append(page) + append_series(series, spages, axes, shape, reshape, name, truncated) + index += npages + + return series + + def _imagej_series(self): + """Return image series in ImageJ file.""" + # ImageJ's dimension order is always TZCYXS + # TODO: fix loading of color, composite, or palette images + self.pages.useframes = True + self.pages.keyframe = 0 + + ij = self.imagej_metadata + pages = self.pages + page = pages[0] + + def is_hyperstack(): + # ImageJ hyperstack store all image metadata in the first page and + # image data are stored contiguously before the second page, if any + if not page.is_final: + return False + images = ij.get("images", 0) + if images <= 1: + return False + offset, count = page.is_contiguous + if ( + count != product(page.shape) * page.bitspersample // 8 + or offset + count * images > self.filehandle.size + ): + raise ValueError() + # check that next page is stored after data + if len(pages) > 1 and offset + count * images > pages[1].offset: + return False + return True + + try: + hyperstack = is_hyperstack() + except ValueError: + warnings.warn("invalid ImageJ metadata or corrupted file") + return + if hyperstack: + # no need to read other pages + pages = [page] + else: + self.pages.load() + + shape = [] + axes = [] + if "frames" in ij: + shape.append(ij["frames"]) + axes.append("T") + if "slices" in ij: + shape.append(ij["slices"]) + axes.append("Z") + if "channels" in ij and not ( + page.photometric == 2 and not ij.get("hyperstack", False) + ): + shape.append(ij["channels"]) + axes.append("C") + remain = ij.get("images", len(pages)) // (product(shape) if shape else 1) + if remain > 1: + shape.append(remain) + axes.append("I") + if page.axes[0] == "I": + # contiguous multiple images + shape.extend(page.shape[1:]) + axes.extend(page.axes[1:]) + elif page.axes[:2] == "SI": + # color-mapped contiguous multiple images + shape = page.shape[0:1] + tuple(shape) + page.shape[2:] + axes = list(page.axes[0]) + axes + list(page.axes[2:]) + else: + shape.extend(page.shape) + axes.extend(page.axes) + + truncated = ( + hyperstack + and len(self.pages) == 1 + and page.is_contiguous[1] != product(shape) * page.bitspersample // 8 + ) + + return [ + TiffPageSeries( + pages, shape, page.dtype, axes, stype="ImageJ", truncated=truncated + ) + ] + + def _fluoview_series(self): + """Return image series in FluoView file.""" + self.pages.useframes = True + self.pages.keyframe = 0 + self.pages.load() + mm = self.fluoview_metadata + mmhd = list(reversed(mm["Dimensions"])) + axes = "".join( + TIFF.MM_DIMENSIONS.get(i[0].upper(), "Q") for i in mmhd if i[1] > 1 + ) + shape = tuple(int(i[1]) for i in mmhd if i[1] > 1) + return [ + TiffPageSeries( + self.pages, + shape, + self.pages[0].dtype, + axes, + name=mm["ImageName"], + stype="FluoView", + ) + ] + + def _mdgel_series(self): + """Return image series in MD Gel file.""" + # only a single page, scaled according to metadata in second page + self.pages.useframes = False + self.pages.keyframe = 0 + self.pages.load() + md = self.mdgel_metadata + if md["FileTag"] in (2, 128): + dtype = numpy.dtype("float32") + scale = md["ScalePixel"] + scale = scale[0] / scale[1] # rational + if md["FileTag"] == 2: + # squary root data format + def transform(a): + return a.astype("float32") ** 2 * scale + + else: + + def transform(a): + return a.astype("float32") * scale + + else: + transform = None + page = self.pages[0] + return [ + TiffPageSeries( + [page], page.shape, dtype, page.axes, transform=transform, stype="MDGel" + ) + ] + + def _nih_series(self): + """Return image series in NIH file.""" + self.pages.useframes = True + self.pages.keyframe = 0 + self.pages.load() + page0 = self.pages[0] + if len(self.pages) == 1: + shape = page0.shape + axes = page0.axes + else: + shape = (len(self.pages),) + page0.shape + axes = "I" + page0.axes + return [TiffPageSeries(self.pages, shape, page0.dtype, axes, stype="NIH")] + + def _ome_series(self): + """Return image series in OME-TIFF file(s).""" + from xml.etree import cElementTree as etree # delayed import + + omexml = self.pages[0].description + try: + root = etree.fromstring(omexml) + except etree.ParseError as e: + # TODO: test badly encoded OME-XML + warnings.warn("ome-xml: %s" % e) + try: + # might work on Python 2 + omexml = omexml.decode("utf-8", "ignore").encode("utf-8") + root = etree.fromstring(omexml) + except Exception: + return + + self.pages.useframes = True + self.pages.keyframe = 0 + self.pages.load() + + uuid = root.attrib.get("UUID", None) + self._files = {uuid: self} + dirname = self._fh.dirname + modulo = {} + series = [] + for element in root: + if element.tag.endswith("BinaryOnly"): + # TODO: load OME-XML from master or companion file + warnings.warn("ome-xml: not an ome-tiff master file") + break + if element.tag.endswith("StructuredAnnotations"): + for annot in element: + if not annot.attrib.get("Namespace", "").endswith("modulo"): + continue + for value in annot: + for modul in value: + for along in modul: + if not along.tag[:-1].endswith("Along"): + continue + axis = along.tag[-1] + newaxis = along.attrib.get("Type", "other") + newaxis = TIFF.AXES_LABELS[newaxis] + if "Start" in along.attrib: + step = float(along.attrib.get("Step", 1)) + start = float(along.attrib["Start"]) + stop = float(along.attrib["End"]) + step + labels = numpy.arange(start, stop, step) + else: + labels = [ + label.text + for label in along + if label.tag.endswith("Label") + ] + modulo[axis] = (newaxis, labels) + + if not element.tag.endswith("Image"): + continue + + attr = element.attrib + name = attr.get("Name", None) + + for pixels in element: + if not pixels.tag.endswith("Pixels"): + continue + attr = pixels.attrib + dtype = attr.get("PixelType", None) + axes = "".join(reversed(attr["DimensionOrder"])) + shape = list(int(attr["Size" + ax]) for ax in axes) + size = product(shape[:-2]) + ifds = None + spp = 1 # samples per pixel + # FIXME: this implementation assumes the last two + # dimensions are stored in tiff pages (shape[:-2]). + # Apparently that is not always the case. + for data in pixels: + if data.tag.endswith("Channel"): + attr = data.attrib + if ifds is None: + spp = int(attr.get("SamplesPerPixel", spp)) + ifds = [None] * (size // spp) + elif int(attr.get("SamplesPerPixel", 1)) != spp: + raise ValueError("cannot handle differing SamplesPerPixel") + continue + if ifds is None: + ifds = [None] * (size // spp) + if not data.tag.endswith("TiffData"): + continue + attr = data.attrib + ifd = int(attr.get("IFD", 0)) + num = int(attr.get("NumPlanes", 1 if "IFD" in attr else 0)) + num = int(attr.get("PlaneCount", num)) + idx = [int(attr.get("First" + ax, 0)) for ax in axes[:-2]] + try: + idx = numpy.ravel_multi_index(idx, shape[:-2]) + except ValueError: + # ImageJ produces invalid ome-xml when cropping + warnings.warn("ome-xml: invalid TiffData index") + continue + for uuid in data: + if not uuid.tag.endswith("UUID"): + continue + if uuid.text not in self._files: + if not self._multifile: + # abort reading multifile OME series + # and fall back to generic series + return [] + fname = uuid.attrib["FileName"] + try: + tif = TiffFile(os.path.join(dirname, fname)) + tif.pages.useframes = True + tif.pages.keyframe = 0 + tif.pages.load() + except (IOError, FileNotFoundError, ValueError): + warnings.warn("ome-xml: failed to read '%s'" % fname) + break + self._files[uuid.text] = tif + tif.close() + pages = self._files[uuid.text].pages + try: + for i in range(num if num else len(pages)): + ifds[idx + i] = pages[ifd + i] + except IndexError: + warnings.warn("ome-xml: index out of range") + # only process first UUID + break + else: + pages = self.pages + try: + for i in range(num if num else len(pages)): + ifds[idx + i] = pages[ifd + i] + except IndexError: + warnings.warn("ome-xml: index out of range") + + if all(i is None for i in ifds): + # skip images without data + continue + + # set a keyframe on all IFDs + keyframe = None + for i in ifds: + # try find a TiffPage + if i and i == i.keyframe: + keyframe = i + break + if not keyframe: + # reload a TiffPage from file + for i, keyframe in enumerate(ifds): + if keyframe: + keyframe.parent.pages.keyframe = keyframe.index + keyframe = keyframe.parent.pages[keyframe.index] + ifds[i] = keyframe + break + for i in ifds: + if i is not None: + i.keyframe = keyframe + + dtype = keyframe.dtype + series.append( + TiffPageSeries( + ifds, shape, dtype, axes, parent=self, name=name, stype="OME" + ) + ) + for serie in series: + shape = list(serie.shape) + for axis, (newaxis, labels) in modulo.items(): + i = serie.axes.index(axis) + size = len(labels) + if shape[i] == size: + serie.axes = serie.axes.replace(axis, newaxis, 1) + else: + shape[i] //= size + shape.insert(i + 1, size) + serie.axes = serie.axes.replace(axis, axis + newaxis, 1) + serie.shape = tuple(shape) + # squeeze dimensions + for serie in series: + serie.shape, serie.axes = squeeze_axes(serie.shape, serie.axes) + return series + + def _lsm_series(self): + """Return main image series in LSM file. Skip thumbnails.""" + lsmi = self.lsm_metadata + axes = TIFF.CZ_LSMINFO_SCANTYPE[lsmi["ScanType"]] + if self.pages[0].photometric == 2: # RGB; more than one channel + axes = axes.replace("C", "").replace("XY", "XYC") + if lsmi.get("DimensionP", 0) > 1: + axes += "P" + if lsmi.get("DimensionM", 0) > 1: + axes += "M" + axes = axes[::-1] + shape = tuple(int(lsmi[TIFF.CZ_LSMINFO_DIMENSIONS[i]]) for i in axes) + name = lsmi.get("Name", "") + self.pages.keyframe = 0 + pages = self.pages[::2] + dtype = pages[0].dtype + series = [TiffPageSeries(pages, shape, dtype, axes, name=name, stype="LSM")] + + if self.pages[1].is_reduced: + self.pages.keyframe = 1 + pages = self.pages[1::2] + dtype = pages[0].dtype + cp, i = 1, 0 + while cp < len(pages) and i < len(shape) - 2: + cp *= shape[i] + i += 1 + shape = shape[:i] + pages[0].shape + axes = axes[:i] + "CYX" + series.append( + TiffPageSeries(pages, shape, dtype, axes, name=name, stype="LSMreduced") + ) + + return series + + def _lsm_load_pages(self): + """Load all pages from LSM file.""" + self.pages.cache = True + self.pages.useframes = True + # second series: thumbnails + self.pages.keyframe = 1 + keyframe = self.pages[1] + for page in self.pages[1::2]: + page.keyframe = keyframe + # first series: data + self.pages.keyframe = 0 + keyframe = self.pages[0] + for page in self.pages[::2]: + page.keyframe = keyframe + + def _lsm_fix_strip_offsets(self): + """Unwrap strip offsets for LSM files greater than 4 GB. + + Each series and position require separate unwrapping (undocumented). + + """ + if self.filehandle.size < 2**32: + return + + pages = self.pages + npages = len(pages) + series = self.series[0] + axes = series.axes + + # find positions + positions = 1 + for i in 0, 1: + if series.axes[i] in "PM": + positions *= series.shape[i] + + # make time axis first + if positions > 1: + ntimes = 0 + for i in 1, 2: + if axes[i] == "T": + ntimes = series.shape[i] + break + if ntimes: + div, mod = divmod(npages, 2 * positions * ntimes) + assert mod == 0 + shape = (positions, ntimes, div, 2) + indices = numpy.arange(product(shape)).reshape(shape) + indices = numpy.moveaxis(indices, 1, 0) + else: + indices = numpy.arange(npages).reshape(-1, 2) + + # images of reduced page might be stored first + if pages[0].dataoffsets[0] > pages[1].dataoffsets[0]: + indices = indices[..., ::-1] + + # unwrap offsets + wrap = 0 + previousoffset = 0 + for i in indices.flat: + page = pages[i] + dataoffsets = [] + for currentoffset in page.dataoffsets: + if currentoffset < previousoffset: + wrap += 2**32 + dataoffsets.append(currentoffset + wrap) + previousoffset = currentoffset + page.dataoffsets = tuple(dataoffsets) + + def _lsm_fix_strip_bytecounts(self): + """Set databytecounts to size of compressed data. + + The StripByteCounts tag in LSM files contains the number of bytes + for the uncompressed data. + + """ + pages = self.pages + if pages[0].compression == 1: + return + # sort pages by first strip offset + pages = sorted(pages, key=lambda p: p.dataoffsets[0]) + npages = len(pages) - 1 + for i, page in enumerate(pages): + if page.index % 2: + continue + offsets = page.dataoffsets + bytecounts = page.databytecounts + if i < npages: + lastoffset = pages[i + 1].dataoffsets[0] + else: + # LZW compressed strips might be longer than uncompressed + lastoffset = min(offsets[-1] + 2 * bytecounts[-1], self._fh.size) + offsets = offsets + (lastoffset,) + page.databytecounts = tuple( + offsets[j + 1] - offsets[j] for j in range(len(bytecounts)) + ) + + def __getattr__(self, name): + """Return 'is_flag' attributes from first page.""" + if name[3:] in TIFF.FILE_FLAGS: + if not self.pages: + return False + value = bool(getattr(self.pages[0], name)) + setattr(self, name, value) + return value + raise AttributeError( + "'%s' object has no attribute '%s'" % (self.__class__.__name__, name) + ) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def __str__(self, detail=0, width=79): + """Return string containing information about file. + + The detail parameter specifies the level of detail returned: + + 0: file only. + 1: all series, first page of series and its tags. + 2: large tag values and file metadata. + 3: all pages. + + """ + info = [ + "TiffFile '%s'", + format_size(self._fh.size), + {"<": "LittleEndian", ">": "BigEndian"}[self.byteorder], + ] + if self.is_bigtiff: + info.append("BigTiff") + info.append("|".join(f.upper() for f in self.flags)) + if len(self.pages) > 1: + info.append("%i Pages" % len(self.pages)) + if len(self.series) > 1: + info.append("%i Series" % len(self.series)) + if len(self._files) > 1: + info.append("%i Files" % (len(self._files))) + info = " ".join(info) + info = info.replace(" ", " ").replace(" ", " ") + info = info % snipstr(self._fh.name, max(12, width + 2 - len(info))) + if detail <= 0: + return info + info = [info] + info.append("\n".join(str(s) for s in self.series)) + if detail >= 3: + info.extend( + ( + TiffPage.__str__(p, detail=detail, width=width) + for p in self.pages + if p is not None + ) + ) + else: + info.extend( + ( + TiffPage.__str__(s.pages[0], detail=detail, width=width) + for s in self.series + if s.pages[0] is not None + ) + ) + if detail >= 2: + for name in sorted(self.flags): + if hasattr(self, name + "_metadata"): + m = getattr(self, name + "_metadata") + if m: + info.append( + "%s_METADATA\n%s" + % ( + name.upper(), + pformat(m, width=width, height=detail * 12), + ) + ) + return "\n\n".join(info).replace("\n\n\n", "\n\n") + + @lazyattr + def flags(self): + """Return set of file flags.""" + return set( + name.lower() + for name in sorted(TIFF.FILE_FLAGS) + if getattr(self, "is_" + name) + ) + + @lazyattr + def is_mdgel(self): + """File has MD Gel format.""" + try: + return self.pages[0].is_mdgel or self.pages[1].is_mdgel + except IndexError: + return False + + @property + def is_movie(self): + """Return if file is a movie.""" + return self.pages.useframes + + @lazyattr + def shaped_metadata(self): + """Return Tifffile metadata from JSON descriptions as dicts.""" + if not self.is_shaped: + return + return tuple( + json_description_metadata(s.pages[0].is_shaped) + for s in self.series + if s.stype.lower() == "shaped" + ) + + @lazyattr + def ome_metadata(self): + """Return OME XML as dict.""" + # TODO: remove this or return XML? + if not self.is_ome: + return + return xml2dict(self.pages[0].description)["OME"] + + @lazyattr + def qptiff_metadata(self): + """Return PerkinElmer-QPI-ImageDescription XML element as dict.""" + if not self.is_qptiff: + return + root = "PerkinElmer-QPI-ImageDescription" + xml = self.pages[0].description.replace(" " + root + " ", root) + return xml2dict(xml)[root] + + @lazyattr + def lsm_metadata(self): + """Return LSM metadata from CZ_LSMINFO tag as dict.""" + if not self.is_lsm: + return + return self.pages[0].tags["CZ_LSMINFO"].value + + @lazyattr + def stk_metadata(self): + """Return STK metadata from UIC tags as dict.""" + if not self.is_stk: + return + page = self.pages[0] + tags = page.tags + result = {} + result["NumberPlanes"] = tags["UIC2tag"].count + if page.description: + result["PlaneDescriptions"] = page.description.split("\0") + # result['plane_descriptions'] = stk_description_metadata( + # page.image_description) + if "UIC1tag" in tags: + result.update(tags["UIC1tag"].value) + if "UIC3tag" in tags: + result.update(tags["UIC3tag"].value) # wavelengths + if "UIC4tag" in tags: + result.update(tags["UIC4tag"].value) # override uic1 tags + uic2tag = tags["UIC2tag"].value + result["ZDistance"] = uic2tag["ZDistance"] + result["TimeCreated"] = uic2tag["TimeCreated"] + result["TimeModified"] = uic2tag["TimeModified"] + try: + result["DatetimeCreated"] = numpy.array( + [ + julian_datetime(*dt) + for dt in zip(uic2tag["DateCreated"], uic2tag["TimeCreated"]) + ], + dtype="datetime64[ns]", + ) + result["DatetimeModified"] = numpy.array( + [ + julian_datetime(*dt) + for dt in zip(uic2tag["DateModified"], uic2tag["TimeModified"]) + ], + dtype="datetime64[ns]", + ) + except ValueError as e: + warnings.warn("stk_metadata: %s" % e) + return result + + @lazyattr + def imagej_metadata(self): + """Return consolidated ImageJ metadata as dict.""" + if not self.is_imagej: + return + page = self.pages[0] + result = imagej_description_metadata(page.is_imagej) + if "IJMetadata" in page.tags: + try: + result.update(page.tags["IJMetadata"].value) + except Exception: + pass + return result + + @lazyattr + def fluoview_metadata(self): + """Return consolidated FluoView metadata as dict.""" + if not self.is_fluoview: + return + result = {} + page = self.pages[0] + result.update(page.tags["MM_Header"].value) + # TODO: read stamps from all pages + result["Stamp"] = page.tags["MM_Stamp"].value + # skip parsing image description; not reliable + # try: + # t = fluoview_description_metadata(page.image_description) + # if t is not None: + # result['ImageDescription'] = t + # except Exception as e: + # warnings.warn( + # "failed to read FluoView image description: %s" % e) + return result + + @lazyattr + def nih_metadata(self): + """Return NIH Image metadata from NIHImageHeader tag as dict.""" + if not self.is_nih: + return + return self.pages[0].tags["NIHImageHeader"].value + + @lazyattr + def fei_metadata(self): + """Return FEI metadata from SFEG or HELIOS tags as dict.""" + if not self.is_fei: + return + tags = self.pages[0].tags + if "FEI_SFEG" in tags: + return tags["FEI_SFEG"].value + if "FEI_HELIOS" in tags: + return tags["FEI_HELIOS"].value + + @lazyattr + def sem_metadata(self): + """Return SEM metadata from CZ_SEM tag as dict.""" + if not self.is_sem: + return + return self.pages[0].tags["CZ_SEM"].value + + @lazyattr + def mdgel_metadata(self): + """Return consolidated metadata from MD GEL tags as dict.""" + for page in self.pages[:2]: + if "MDFileTag" in page.tags: + tags = page.tags + break + else: + return + result = {} + for code in range(33445, 33453): + name = TIFF.TAGS[code] + if name not in tags: + continue + result[name[2:]] = tags[name].value + return result + + @lazyattr + def andor_metadata(self): + """Return Andor tags as dict.""" + return self.pages[0].andor_tags + + @lazyattr + def epics_metadata(self): + """Return EPICS areaDetector tags as dict.""" + return self.pages[0].epics_tags + + @lazyattr + def tvips_metadata(self): + """Return TVIPS tag as dict.""" + if not self.is_tvips: + return + return self.pages[0].tags["TVIPS"].value + + @lazyattr + def metaseries_metadata(self): + """Return MetaSeries metadata from image description as dict.""" + if not self.is_metaseries: + return + return metaseries_description_metadata(self.pages[0].description) + + @lazyattr + def pilatus_metadata(self): + """Return Pilatus metadata from image description as dict.""" + if not self.is_pilatus: + return + return pilatus_description_metadata(self.pages[0].description) + + @lazyattr + def micromanager_metadata(self): + """Return consolidated MicroManager metadata as dict.""" + if not self.is_micromanager: + return + # from file header + result = read_micromanager_metadata(self._fh) + # from tag + result.update(self.pages[0].tags["MicroManagerMetadata"].value) + return result + + @lazyattr + def scanimage_metadata(self): + """Return ScanImage non-varying frame and ROI metadata as dict.""" + if not self.is_scanimage: + return + result = {} + try: + framedata, roidata = read_scanimage_metadata(self._fh) + result["FrameData"] = framedata + result.update(roidata) + except ValueError: + pass + # TODO: scanimage_artist_metadata + try: + result["Description"] = scanimage_description_metadata( + self.pages[0].description + ) + except Exception as e: + warnings.warn("scanimage_description_metadata failed: %s" % e) + return result + + @property + def geotiff_metadata(self): + """Return GeoTIFF metadata from first page as dict.""" + if not self.is_geotiff: + return + return self.pages[0].geotiff_tags + + +class TiffPages(object): + """Sequence of TIFF image file directories.""" + + def __init__(self, parent): + """Initialize instance from file. Read first TiffPage from file. + + The file position must be at an offset to an offset to a TiffPage. + + """ + self.parent = parent + self.pages = [] # cache of TiffPages, TiffFrames, or their offsets + self.complete = False # True if offsets to all pages were read + self._tiffpage = TiffPage # class for reading tiff pages + self._keyframe = None + self._cache = True + + # read offset to first page + fh = parent.filehandle + self._nextpageoffset = fh.tell() + offset = struct.unpack(parent.offsetformat, fh.read(parent.offsetsize))[0] + + if offset == 0: + # warnings.warn('file contains no pages') + self.complete = True + return + if offset >= fh.size: + warnings.warn("invalid page offset (%i)" % offset) + self.complete = True + return + + # always read and cache first page + fh.seek(offset) + page = TiffPage(parent, index=0) + self.pages.append(page) + self._keyframe = page + + @property + def cache(self): + """Return if pages/frames are currently being cached.""" + return self._cache + + @cache.setter + def cache(self, value): + """Enable or disable caching of pages/frames. Clear cache if False.""" + value = bool(value) + if self._cache and not value: + self.clear() + self._cache = value + + @property + def useframes(self): + """Return if currently using TiffFrame (True) or TiffPage (False).""" + return self._tiffpage == TiffFrame and TiffFrame is not TiffPage + + @useframes.setter + def useframes(self, value): + """Set to use TiffFrame (True) or TiffPage (False).""" + self._tiffpage = TiffFrame if value else TiffPage + + @property + def keyframe(self): + """Return index of current keyframe.""" + return self._keyframe.index + + @keyframe.setter + def keyframe(self, index): + """Set current keyframe. Load TiffPage from file if necessary.""" + if self._keyframe.index == index: + return + if self.complete or 0 <= index < len(self.pages): + page = self.pages[index] + if isinstance(page, TiffPage): + self._keyframe = page + return + elif isinstance(page, TiffFrame): + # remove existing frame + self.pages[index] = page.offset + # load TiffPage from file + useframes = self.useframes + self._tiffpage = TiffPage + self._keyframe = self[index] + self.useframes = useframes + + @property + def next_page_offset(self): + """Return offset where offset to a new page can be stored.""" + if not self.complete: + self._seek(-1) + return self._nextpageoffset + + def load(self): + """Read all remaining pages from file.""" + fh = self.parent.filehandle + keyframe = self._keyframe + pages = self.pages + if not self.complete: + self._seek(-1) + for i, page in enumerate(pages): + if isinstance(page, inttypes): + fh.seek(page) + page = self._tiffpage(self.parent, index=i, keyframe=keyframe) + pages[i] = page + + def clear(self, fully=True): + """Delete all but first page from cache. Set keyframe to first page.""" + pages = self.pages + if not self._cache or len(pages) < 1: + return + self._keyframe = pages[0] + if fully: + # delete all but first TiffPage/TiffFrame + for i, page in enumerate(pages[1:]): + if not isinstance(page, inttypes): + pages[i + 1] = page.offset + elif TiffFrame is not TiffPage: + # delete only TiffFrames + for i, page in enumerate(pages): + if isinstance(page, TiffFrame): + pages[i] = page.offset + + def _seek(self, index, maxpages=2**22): + """Seek file to offset of specified page.""" + pages = self.pages + if not pages: + return + + fh = self.parent.filehandle + if fh.closed: + raise RuntimeError("FileHandle is closed") + + if self.complete or 0 <= index < len(pages): + page = pages[index] + offset = page if isinstance(page, inttypes) else page.offset + fh.seek(offset) + return + + offsetformat = self.parent.offsetformat + offsetsize = self.parent.offsetsize + tagnoformat = self.parent.tagnoformat + tagnosize = self.parent.tagnosize + tagsize = self.parent.tagsize + unpack = struct.unpack + + page = pages[-1] + offset = page if isinstance(page, inttypes) else page.offset + + while len(pages) < maxpages: + # read offsets to pages from file until index is reached + fh.seek(offset) + # skip tags + try: + tagno = unpack(tagnoformat, fh.read(tagnosize))[0] + if tagno > 4096: + raise ValueError("suspicious number of tags") + except Exception: + warnings.warn("corrupted tag list at offset %i" % offset) + del pages[-1] + self.complete = True + break + self._nextpageoffset = offset + tagnosize + tagno * tagsize + fh.seek(self._nextpageoffset) + + # read offset to next page + offset = unpack(offsetformat, fh.read(offsetsize))[0] + if offset == 0: + self.complete = True + break + if offset >= fh.size: + warnings.warn("invalid page offset (%i)" % offset) + self.complete = True + break + + pages.append(offset) + if 0 <= index < len(pages): + break + + if index >= len(pages): + raise IndexError("list index out of range") + + page = pages[index] + fh.seek(page if isinstance(page, inttypes) else page.offset) + + def __bool__(self): + """Return True if file contains any pages.""" + return len(self.pages) > 0 + + def __len__(self): + """Return number of pages in file.""" + if not self.complete: + self._seek(-1) + return len(self.pages) + + def __getitem__(self, key): + """Return specified page(s) from cache or file.""" + pages = self.pages + if not pages: + raise IndexError("list index out of range") + if key == 0: + return pages[key] + + if isinstance(key, slice): + start, stop, _ = key.indices(2**31 - 1) + if not self.complete and max(stop, start) > len(pages): + self._seek(-1) + return [self[i] for i in range(*key.indices(len(pages)))] + + if self.complete and key >= len(pages): + raise IndexError("list index out of range") + + try: + page = pages[key] + except IndexError: + page = 0 + if not isinstance(page, inttypes): + return page + + self._seek(key) + page = self._tiffpage(self.parent, index=key, keyframe=self._keyframe) + if self._cache: + pages[key] = page + return page + + def __iter__(self): + """Return iterator over all pages.""" + i = 0 + while True: + try: + yield self[i] + i += 1 + except IndexError: + break + + +class TiffPage(object): + """TIFF image file directory (IFD). + + Attributes + ---------- + index : int + Index of page in file. + dtype : numpy.dtype or None + Data type (native byte order) of the image in IFD. + shape : tuple + Dimensions of the image in IFD. + axes : str + Axes label codes: + 'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane, + 'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda, + 'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime, + 'L' exposure, 'V' event, 'Q' unknown, '_' missing + tags : dict + Dictionary of tags in IFD. {tag.name: TiffTag} + colormap : numpy.ndarray + Color look up table, if exists. + + All attributes are read-only. + + Notes + ----- + The internal, normalized '_shape' attribute is 6 dimensional: + + 0 : number planes/images (stk, ij). + 1 : planar samplesperpixel. + 2 : imagedepth Z (sgi). + 3 : imagelength Y. + 4 : imagewidth X. + 5 : contig samplesperpixel. + + """ + + # default properties; will be updated from tags + imagewidth = 0 + imagelength = 0 + imagedepth = 1 + tilewidth = 0 + tilelength = 0 + tiledepth = 1 + bitspersample = 1 + samplesperpixel = 1 + sampleformat = 1 + rowsperstrip = 2**32 - 1 + compression = 1 + planarconfig = 1 + fillorder = 1 + photometric = 0 + predictor = 1 + extrasamples = 1 + colormap = None + software = "" + description = "" + description1 = "" + + def __init__(self, parent, index, keyframe=None): + """Initialize instance from file. + + The file handle position must be at offset to a valid IFD. + + """ + self.parent = parent + self.index = index + self.shape = () + self._shape = () + self.dtype = None + self._dtype = None + self.axes = "" + self.tags = {} + + self.dataoffsets = () + self.databytecounts = () + + # read TIFF IFD structure and its tags from file + fh = parent.filehandle + self.offset = fh.tell() # offset to this IFD + try: + tagno = struct.unpack(parent.tagnoformat, fh.read(parent.tagnosize))[0] + if tagno > 4096: + raise ValueError("suspicious number of tags") + except Exception: + raise ValueError("corrupted tag list at offset %i" % self.offset) + + tagsize = parent.tagsize + data = fh.read(tagsize * tagno) + tags = self.tags + index = -tagsize + for _ in range(tagno): + index += tagsize + try: + tag = TiffTag(self.parent, data[index : index + tagsize]) + except TiffTag.Error as e: + warnings.warn(str(e)) + continue + tagname = tag.name + if tagname not in tags: + name = tagname + tags[name] = tag + else: + # some files contain multiple tags with same code + # e.g. MicroManager files contain two ImageDescription tags + i = 1 + while True: + name = "%s%i" % (tagname, i) + if name not in tags: + tags[name] = tag + break + name = TIFF.TAG_ATTRIBUTES.get(name, "") + if name: + if name[:3] in "sof des" and not isinstance(tag.value, str): + pass # wrong string type for software, description + else: + setattr(self, name, tag.value) + + if not tags: + return # found in FIBICS + + # consolidate private tags; remove them from self.tags + if self.is_andor: + self.andor_tags + elif self.is_epics: + self.epics_tags + + if self.is_lsm or (self.index and self.parent.is_lsm): + # correct non standard LSM bitspersample tags + self.tags["BitsPerSample"]._fix_lsm_bitspersample(self) + + if self.is_vista or (self.index and self.parent.is_vista): + # ISS Vista writes wrong ImageDepth tag + self.imagedepth = 1 + + if self.is_stk and "UIC1tag" in tags and not tags["UIC1tag"].value: + # read UIC1tag now that plane count is known + uic1tag = tags["UIC1tag"] + fh.seek(uic1tag.valueoffset) + tags["UIC1tag"].value = read_uic1tag( + fh, + self.parent.byteorder, + uic1tag.dtype, + uic1tag.count, + None, + tags["UIC2tag"].count, + ) + + if "IJMetadata" in tags: + # decode IJMetadata tag + try: + tags["IJMetadata"].value = imagej_metadata( + tags["IJMetadata"].value, + tags["IJMetadataByteCounts"].value, + self.parent.byteorder, + ) + except Exception as e: + warnings.warn(str(e)) + + if "BitsPerSample" in tags: + tag = tags["BitsPerSample"] + if tag.count == 1: + self.bitspersample = tag.value + else: + # LSM might list more items than samplesperpixel + value = tag.value[: self.samplesperpixel] + if any((v - value[0] for v in value)): + self.bitspersample = value + else: + self.bitspersample = value[0] + + if "SampleFormat" in tags: + tag = tags["SampleFormat"] + if tag.count == 1: + self.sampleformat = tag.value + else: + value = tag.value[: self.samplesperpixel] + if any((v - value[0] for v in value)): + self.sampleformat = value + else: + self.sampleformat = value[0] + + if "ImageLength" in tags: + if "RowsPerStrip" not in tags or tags["RowsPerStrip"].count > 1: + self.rowsperstrip = self.imagelength + # self.stripsperimage = int(math.floor( + # float(self.imagelength + self.rowsperstrip - 1) / + # self.rowsperstrip)) + + # determine dtype + dtype = self.sampleformat, self.bitspersample + dtype = TIFF.SAMPLE_DTYPES.get(dtype, None) + if dtype is not None: + dtype = numpy.dtype(dtype) + self.dtype = self._dtype = dtype + + # determine shape of data + imagelength = self.imagelength + imagewidth = self.imagewidth + imagedepth = self.imagedepth + samplesperpixel = self.samplesperpixel + + if self.is_stk: + assert self.imagedepth == 1 + uictag = tags["UIC2tag"].value + planes = tags["UIC2tag"].count + if self.planarconfig == 1: + self._shape = (planes, 1, 1, imagelength, imagewidth, samplesperpixel) + if samplesperpixel == 1: + self.shape = (planes, imagelength, imagewidth) + self.axes = "YX" + else: + self.shape = (planes, imagelength, imagewidth, samplesperpixel) + self.axes = "YXS" + else: + self._shape = (planes, samplesperpixel, 1, imagelength, imagewidth, 1) + if samplesperpixel == 1: + self.shape = (planes, imagelength, imagewidth) + self.axes = "YX" + else: + self.shape = (planes, samplesperpixel, imagelength, imagewidth) + self.axes = "SYX" + # detect type of series + if planes == 1: + self.shape = self.shape[1:] + elif numpy.all(uictag["ZDistance"] != 0): + self.axes = "Z" + self.axes + elif numpy.all(numpy.diff(uictag["TimeCreated"]) != 0): + self.axes = "T" + self.axes + else: + self.axes = "I" + self.axes + elif self.photometric == 2 or samplesperpixel > 1: # PHOTOMETRIC.RGB + if self.planarconfig == 1: + self._shape = ( + 1, + 1, + imagedepth, + imagelength, + imagewidth, + samplesperpixel, + ) + if imagedepth == 1: + self.shape = (imagelength, imagewidth, samplesperpixel) + self.axes = "YXS" + else: + self.shape = (imagedepth, imagelength, imagewidth, samplesperpixel) + self.axes = "ZYXS" + else: + self._shape = ( + 1, + samplesperpixel, + imagedepth, + imagelength, + imagewidth, + 1, + ) + if imagedepth == 1: + self.shape = (samplesperpixel, imagelength, imagewidth) + self.axes = "SYX" + else: + self.shape = (samplesperpixel, imagedepth, imagelength, imagewidth) + self.axes = "SZYX" + else: + self._shape = (1, 1, imagedepth, imagelength, imagewidth, 1) + if imagedepth == 1: + self.shape = (imagelength, imagewidth) + self.axes = "YX" + else: + self.shape = (imagedepth, imagelength, imagewidth) + self.axes = "ZYX" + + # dataoffsets and databytecounts + if "TileOffsets" in tags: + self.dataoffsets = tags["TileOffsets"].value + elif "StripOffsets" in tags: + self.dataoffsets = tags["StripOffsets"].value + else: + self.dataoffsets = (0,) + + if "TileByteCounts" in tags: + self.databytecounts = tags["TileByteCounts"].value + elif "StripByteCounts" in tags: + self.databytecounts = tags["StripByteCounts"].value + else: + self.databytecounts = (product(self.shape) * (self.bitspersample // 8),) + if self.compression != 1: + warnings.warn("required ByteCounts tag is missing") + + assert len(self.shape) == len(self.axes) + + def asarray( + self, + out=None, + squeeze=True, + lock=None, + reopen=True, + maxsize=2**44, + validate=True, + ): + """Read image data from file and return as numpy array. + + Raise ValueError if format is unsupported. + + Parameters + ---------- + out : numpy.ndarray, str, or file-like object; optional + Buffer where image data will be saved. + If None (default), a new array will be created. + If numpy.ndarray, a writable array of compatible dtype and shape. + If 'memmap', directly memory-map the image data in the TIFF file + if possible; else create a memory-mapped array in a temporary file. + If str or open file, the file name or file object used to + create a memory-map to an array stored in a binary file on disk. + squeeze : bool + If True, all length-1 dimensions (except X and Y) are + squeezed out from the array. + If False, the shape of the returned array might be different from + the page.shape. + lock : {RLock, NullContext} + A reentrant lock used to synchronize reads from file. + If None (default), the lock of the parent's filehandle is used. + reopen : bool + If True (default) and the parent file handle is closed, the file + is temporarily re-opened and closed if no exception occurs. + maxsize: int or None + Maximum size of data before a ValueError is raised. + Can be used to catch DOS. Default: 16 TB. + validate : bool + If True (default), validate various parameters. + If None, only validate parameters and return None. + + """ + self_ = self + self = self.keyframe # self or keyframe + + if not self._shape or product(self._shape) == 0: + return + + tags = self.tags + + if validate or validate is None: + if maxsize and product(self._shape) > maxsize: + raise ValueError("data are too large %s" % str(self._shape)) + if self.dtype is None: + raise ValueError( + "data type not supported: %s%i" + % (self.sampleformat, self.bitspersample) + ) + if self.compression not in TIFF.DECOMPESSORS: + raise ValueError("cannot decompress %s" % self.compression.name) + if "SampleFormat" in tags: + tag = tags["SampleFormat"] + if tag.count != 1 and any((i - tag.value[0] for i in tag.value)): + raise ValueError("sample formats do not match %s" % tag.value) + if self.is_chroma_subsampled and ( + self.compression != 7 or self.planarconfig == 2 + ): + raise NotImplementedError("chroma subsampling not supported") + if validate is None: + return + + fh = self_.parent.filehandle + lock = fh.lock if lock is None else lock + with lock: + closed = fh.closed + if closed: + if reopen: + fh.open() + else: + raise IOError("file handle is closed") + + dtype = self._dtype + shape = self._shape + imagewidth = self.imagewidth + imagelength = self.imagelength + imagedepth = self.imagedepth + bitspersample = self.bitspersample + typecode = self.parent.byteorder + dtype.char + lsb2msb = self.fillorder == 2 + offsets, bytecounts = self_.offsets_bytecounts + istiled = self.is_tiled + + if istiled: + tilewidth = self.tilewidth + tilelength = self.tilelength + tiledepth = self.tiledepth + tw = (imagewidth + tilewidth - 1) // tilewidth + tl = (imagelength + tilelength - 1) // tilelength + td = (imagedepth + tiledepth - 1) // tiledepth + shape = ( + shape[0], + shape[1], + td * tiledepth, + tl * tilelength, + tw * tilewidth, + shape[-1], + ) + tileshape = (tiledepth, tilelength, tilewidth, shape[-1]) + runlen = tilewidth + else: + runlen = imagewidth + + if self.planarconfig == 1: + runlen *= self.samplesperpixel + + if out == "memmap" and self.is_memmappable: + with lock: + result = fh.memmap_array(typecode, shape, offset=offsets[0]) + elif self.is_contiguous: + if out is not None: + out = create_output(out, shape, dtype) + with lock: + fh.seek(offsets[0]) + result = fh.read_array(typecode, product(shape), out=out) + if out is None and not result.dtype.isnative: + # swap byte order and dtype without copy + result.byteswap(True) + result = result.newbyteorder() + if lsb2msb: + reverse_bitorder(result) + else: + result = create_output(out, shape, dtype) + + decompress = TIFF.DECOMPESSORS[self.compression] + + if self.compression == 7: # COMPRESSION.JPEG + if bitspersample not in (8, 12): + raise ValueError("unsupported JPEG precision %i" % bitspersample) + if "JPEGTables" in tags: + table = tags["JPEGTables"].value + else: + table = b"" + unpack = identityfunc + colorspace = TIFF.PHOTOMETRIC(self.photometric).name + + def decompress( + x, + func=decompress, + table=table, + bitspersample=bitspersample, + colorspace=colorspace, + ): + return func(x, table, bitspersample, colorspace).reshape(-1) + + elif bitspersample in (8, 16, 32, 64, 128): + if (bitspersample * runlen) % 8: + raise ValueError("data and sample size mismatch") + + def unpack(x, typecode=typecode): + if self.predictor == 3: # PREDICTOR.FLOATINGPOINT + # the floating point horizontal differencing decoder + # needs the raw byte order + typecode = dtype.char + try: + # read only numpy array + return numpy.frombuffer(x, typecode) + except ValueError: + # strips may be missing EOI + # warnings.warn('unpack: %s' % e) + xlen = (len(x) // (bitspersample // 8)) * (bitspersample // 8) + return numpy.frombuffer(x[:xlen], typecode) + + elif isinstance(bitspersample, tuple): + + def unpack(x, typecode=typecode, bitspersample=bitspersample): + return unpack_rgb(x, typecode, bitspersample) + + else: + + def unpack( + x, typecode=typecode, bitspersample=bitspersample, runlen=runlen + ): + return unpack_ints(x, typecode, bitspersample, runlen) + + if istiled: + writable = None + tw, tl, td, pl = 0, 0, 0, 0 + for tile in buffered_read(fh, lock, offsets, bytecounts): + if lsb2msb: + tile = reverse_bitorder(tile) + tile = decompress(tile) + tile = unpack(tile) + try: + tile.shape = tileshape + except ValueError: + # incomplete tiles; see gdal issue #1179 + warnings.warn("invalid tile data") + t = numpy.zeros(tileshape, dtype).reshape(-1) + s = min(tile.size, t.size) + t[:s] = tile[:s] + tile = t.reshape(tileshape) + if self.predictor == 2: # PREDICTOR.HORIZONTAL + if writable is None: + writable = tile.flags["WRITEABLE"] + if writable: + numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile) + else: + tile = numpy.cumsum(tile, axis=-2, dtype=dtype) + elif self.predictor == 3: # PREDICTOR.FLOATINGPOINT + raise NotImplementedError() + result[ + 0, + pl, + td : td + tiledepth, + tl : tl + tilelength, + tw : tw + tilewidth, + :, + ] = tile + del tile + tw += tilewidth + if tw >= shape[4]: + tw, tl = 0, tl + tilelength + if tl >= shape[3]: + tl, td = 0, td + tiledepth + if td >= shape[2]: + td, pl = 0, pl + 1 + result = result[..., :imagedepth, :imagelength, :imagewidth, :] + else: + strip_size = self.rowsperstrip * self.imagewidth + if self.planarconfig == 1: + strip_size *= self.samplesperpixel + result = result.reshape(-1) + index = 0 + for strip in buffered_read(fh, lock, offsets, bytecounts): + if lsb2msb: + strip = reverse_bitorder(strip) + strip = decompress(strip) + strip = unpack(strip) + size = min(result.size, strip.size, strip_size, result.size - index) + result[index : index + size] = strip[:size] + del strip + index += size + + result.shape = self._shape + + if self.predictor != 1 and not (istiled and not self.is_contiguous): + if self.parent.is_lsm and self.compression == 1: + pass # work around bug in LSM510 software + elif self.predictor == 2: # PREDICTOR.HORIZONTAL + numpy.cumsum(result, axis=-2, dtype=dtype, out=result) + elif self.predictor == 3: # PREDICTOR.FLOATINGPOINT + result = decode_floats(result) + + if squeeze: + try: + result.shape = self.shape + except ValueError: + warnings.warn( + "failed to reshape from %s to %s" + % (str(result.shape), str(self.shape)) + ) + + if closed: + # TODO: file should remain open if an exception occurred above + fh.close() + return result + + def asrgb( + self, + uint8=False, + alpha=None, + colormap=None, + dmin=None, + dmax=None, + *args, + **kwargs + ): + """Return image data as RGB(A). + + Work in progress. + + """ + data = self.asarray(*args, **kwargs) + self = self.keyframe # self or keyframe + photometric = self.photometric + PHOTOMETRIC = TIFF.PHOTOMETRIC + + if photometric == PHOTOMETRIC.PALETTE: + colormap = self.colormap + if colormap.shape[1] < 2**self.bitspersample or self.dtype.char not in "BH": + raise ValueError("cannot apply colormap") + if uint8: + if colormap.max() > 255: + colormap >>= 8 + colormap = colormap.astype("uint8") + if "S" in self.axes: + data = data[..., 0] if self.planarconfig == 1 else data[0] + data = apply_colormap(data, colormap) + + elif photometric == PHOTOMETRIC.RGB: + if "ExtraSamples" in self.tags: + if alpha is None: + alpha = TIFF.EXTRASAMPLE + extrasamples = self.extrasamples + if self.tags["ExtraSamples"].count == 1: + extrasamples = (extrasamples,) + for i, exs in enumerate(extrasamples): + if exs in alpha: + if self.planarconfig == 1: + data = data[..., [0, 1, 2, 3 + i]] + else: + data = data[:, [0, 1, 2, 3 + i]] + break + else: + if self.planarconfig == 1: + data = data[..., :3] + else: + data = data[:, :3] + # TODO: convert to uint8? + + elif photometric == PHOTOMETRIC.MINISBLACK: + raise NotImplementedError() + elif photometric == PHOTOMETRIC.MINISWHITE: + raise NotImplementedError() + elif photometric == PHOTOMETRIC.SEPARATED: + raise NotImplementedError() + else: + raise NotImplementedError() + return data + + def aspage(self): + return self + + @property + def keyframe(self): + return self + + @keyframe.setter + def keyframe(self, index): + return + + @lazyattr + def offsets_bytecounts(self): + """Return simplified offsets and bytecounts.""" + if self.is_contiguous: + offset, byte_count = self.is_contiguous + return [offset], [byte_count] + return clean_offsets_counts(self.dataoffsets, self.databytecounts) + + @lazyattr + def is_contiguous(self): + """Return offset and size of contiguous data, else None. + + Excludes prediction and fill_order. + + """ + if self.compression != 1 or self.bitspersample not in (8, 16, 32, 64): + return + if "TileWidth" in self.tags: + if ( + self.imagewidth != self.tilewidth + or self.imagelength % self.tilelength + or self.tilewidth % 16 + or self.tilelength % 16 + ): + return + if ( + "ImageDepth" in self.tags + and "TileDepth" in self.tags + and ( + self.imagelength != self.tilelength + or self.imagedepth % self.tiledepth + ) + ): + return + + offsets = self.dataoffsets + bytecounts = self.databytecounts + if len(offsets) == 1: + return offsets[0], bytecounts[0] + if self.is_stk or all( + ( + offsets[i] + bytecounts[i] == offsets[i + 1] or bytecounts[i + 1] == 0 + ) # no data/ignore offset + for i in range(len(offsets) - 1) + ): + return offsets[0], sum(bytecounts) + + @lazyattr + def is_final(self): + """Return if page's image data are stored in final form. + + Excludes byte-swapping. + + """ + return ( + self.is_contiguous + and self.fillorder == 1 + and self.predictor == 1 + and not self.is_chroma_subsampled + ) + + @lazyattr + def is_memmappable(self): + """Return if page's image data in file can be memory-mapped.""" + return ( + self.parent.filehandle.is_file + and self.is_final + and + # (self.bitspersample == 8 or self.parent.isnative) and + self.is_contiguous[0] % self.dtype.itemsize == 0 + ) # aligned? + + def __str__(self, detail=0, width=79): + """Return string containing information about page.""" + if self.keyframe != self: + return TiffFrame.__str__(self, detail) + attr = "" + for name in ("memmappable", "final", "contiguous"): + attr = getattr(self, "is_" + name) + if attr: + attr = name.upper() + break + info = " ".join( + s + for s in ( + "x".join(str(i) for i in self.shape), + "%s%s" + % (TIFF.SAMPLEFORMAT(self.sampleformat).name, self.bitspersample), + "|".join( + i + for i in ( + TIFF.PHOTOMETRIC(self.photometric).name, + "TILED" if self.is_tiled else "", + self.compression.name if self.compression != 1 else "", + self.planarconfig.name if self.planarconfig != 1 else "", + self.predictor.name if self.predictor != 1 else "", + self.fillorder.name if self.fillorder != 1 else "", + ) + if i + ), + attr, + "|".join((f.upper() for f in self.flags)), + ) + if s + ) + info = "TiffPage %i @%i %s" % (self.index, self.offset, info) + if detail <= 0: + return info + info = [info] + tags = self.tags + tlines = [] + vlines = [] + for tag in sorted(tags.values(), key=lambda x: x.code): + value = tag.__str__(width=width + 1) + tlines.append(value[:width].strip()) + if detail > 1 and len(value) > width: + name = tag.name.upper() + if detail <= 2 and ("COUNTS" in name or "OFFSETS" in name): + value = pformat(tag.value, width=width, height=detail * 4) + else: + value = pformat(tag.value, width=width, height=detail * 12) + vlines.append("%s\n%s" % (tag.name, value)) + info.append("\n".join(tlines)) + if detail > 1: + info.append("\n\n".join(vlines)) + if detail > 3: + try: + info.append( + "DATA\n%s" % pformat(self.asarray(), width=width, height=detail * 8) + ) + except Exception: + pass + return "\n\n".join(info) + + @lazyattr + def flags(self): + """Return set of flags.""" + return set( + ( + name.lower() + for name in sorted(TIFF.FILE_FLAGS) + if getattr(self, "is_" + name) + ) + ) + + @property + def ndim(self): + """Return number of array dimensions.""" + return len(self.shape) + + @property + def size(self): + """Return number of elements in array.""" + return product(self.shape) + + @lazyattr + def andor_tags(self): + """Return consolidated metadata from Andor tags as dict. + + Remove Andor tags from self.tags. + + """ + if not self.is_andor: + return + tags = self.tags + result = {"Id": tags["AndorId"].value} + for tag in list(self.tags.values()): + code = tag.code + if not 4864 < code < 5031: + continue + value = tag.value + name = tag.name[5:] if len(tag.name) > 5 else tag.name + result[name] = value + del tags[tag.name] + return result + + @lazyattr + def epics_tags(self): + """Return consolidated metadata from EPICS areaDetector tags as dict. + + Remove areaDetector tags from self.tags. + + """ + if not self.is_epics: + return + result = {} + tags = self.tags + for tag in list(self.tags.values()): + code = tag.code + if not 65000 <= code < 65500: + continue + value = tag.value + if code == 65000: + result["timeStamp"] = datetime.datetime.fromtimestamp(float(value)) + elif code == 65001: + result["uniqueID"] = int(value) + elif code == 65002: + result["epicsTSSec"] = int(value) + elif code == 65003: + result["epicsTSNsec"] = int(value) + else: + key, value = value.split(":", 1) + result[key] = astype(value) + del tags[tag.name] + return result + + @lazyattr + def geotiff_tags(self): + """Return consolidated metadata from GeoTIFF tags as dict.""" + if not self.is_geotiff: + return + tags = self.tags + + gkd = tags["GeoKeyDirectoryTag"].value + if gkd[0] != 1: + warnings.warn("invalid GeoKeyDirectoryTag") + return {} + + result = { + "KeyDirectoryVersion": gkd[0], + "KeyRevision": gkd[1], + "KeyRevisionMinor": gkd[2], + # 'NumberOfKeys': gkd[3], + } + # deltags = ['GeoKeyDirectoryTag'] + geokeys = TIFF.GEO_KEYS + geocodes = TIFF.GEO_CODES + for index in range(gkd[3]): + keyid, tagid, count, offset = gkd[4 + index * 4 : index * 4 + 8] + keyid = geokeys.get(keyid, keyid) + if tagid == 0: + value = offset + else: + tagname = TIFF.TAGS[tagid] + # deltags.append(tagname) + value = tags[tagname].value[offset : offset + count] + if tagid == 34737 and count > 1 and value[-1] == "|": + value = value[:-1] + value = value if count > 1 else value[0] + if keyid in geocodes: + try: + value = geocodes[keyid](value) + except Exception: + pass + result[keyid] = value + + if "IntergraphMatrixTag" in tags: + value = tags["IntergraphMatrixTag"].value + value = numpy.array(value) + if len(value) == 16: + value = value.reshape((4, 4)).tolist() + result["IntergraphMatrix"] = value + if "ModelPixelScaleTag" in tags: + value = numpy.array(tags["ModelPixelScaleTag"].value).tolist() + result["ModelPixelScale"] = value + if "ModelTiepointTag" in tags: + value = tags["ModelTiepointTag"].value + value = numpy.array(value).reshape((-1, 6)).squeeze().tolist() + result["ModelTiepoint"] = value + if "ModelTransformationTag" in tags: + value = tags["ModelTransformationTag"].value + value = numpy.array(value).reshape((4, 4)).tolist() + result["ModelTransformation"] = value + elif False: + # if 'ModelPixelScaleTag' in tags and 'ModelTiepointTag' in tags: + sx, sy, sz = tags["ModelPixelScaleTag"].value + tiepoints = tags["ModelTiepointTag"].value + transforms = [] + for tp in range(0, len(tiepoints), 6): + i, j, k, x, y, z = tiepoints[tp : tp + 6] + transforms.append( + [ + [sx, 0.0, 0.0, x - i * sx], + [0.0, -sy, 0.0, y + j * sy], + [0.0, 0.0, sz, z - k * sz], + [0.0, 0.0, 0.0, 1.0], + ] + ) + if len(tiepoints) == 6: + transforms = transforms[0] + result["ModelTransformation"] = transforms + + if "RPCCoefficientTag" in tags: + rpcc = tags["RPCCoefficientTag"].value + result["RPCCoefficient"] = { + "ERR_BIAS": rpcc[0], + "ERR_RAND": rpcc[1], + "LINE_OFF": rpcc[2], + "SAMP_OFF": rpcc[3], + "LAT_OFF": rpcc[4], + "LONG_OFF": rpcc[5], + "HEIGHT_OFF": rpcc[6], + "LINE_SCALE": rpcc[7], + "SAMP_SCALE": rpcc[8], + "LAT_SCALE": rpcc[9], + "LONG_SCALE": rpcc[10], + "HEIGHT_SCALE": rpcc[11], + "LINE_NUM_COEFF": rpcc[12:33], + "LINE_DEN_COEFF ": rpcc[33:53], + "SAMP_NUM_COEFF": rpcc[53:73], + "SAMP_DEN_COEFF": rpcc[73:], + } + + return result + + @property + def is_tiled(self): + """Page contains tiled image.""" + return "TileWidth" in self.tags + + @property + def is_reduced(self): + """Page is reduced image of another image.""" + return "NewSubfileType" in self.tags and self.tags["NewSubfileType"].value & 1 + + @property + def is_chroma_subsampled(self): + """Page contains chroma subsampled image.""" + return "YCbCrSubSampling" in self.tags and self.tags[ + "YCbCrSubSampling" + ].value != (1, 1) + + @lazyattr + def is_imagej(self): + """Return ImageJ description if exists, else None.""" + for description in (self.description, self.description1): + if not description: + return + if description[:7] == "ImageJ=": + return description + + @lazyattr + def is_shaped(self): + """Return description containing array shape if exists, else None.""" + for description in (self.description, self.description1): + if not description: + return + if description[:1] == "{" and '"shape":' in description: + return description + if description[:6] == "shape=": + return description + + @property + def is_mdgel(self): + """Page contains MDFileTag tag.""" + return "MDFileTag" in self.tags + + @property + def is_mediacy(self): + """Page contains Media Cybernetics Id tag.""" + return "MC_Id" in self.tags and self.tags["MC_Id"].value[:7] == b"MC TIFF" + + @property + def is_stk(self): + """Page contains UIC2Tag tag.""" + return "UIC2tag" in self.tags + + @property + def is_lsm(self): + """Page contains CZ_LSMINFO tag.""" + return "CZ_LSMINFO" in self.tags + + @property + def is_fluoview(self): + """Page contains FluoView MM_STAMP tag.""" + return "MM_Stamp" in self.tags + + @property + def is_nih(self): + """Page contains NIH image header.""" + return "NIHImageHeader" in self.tags + + @property + def is_sgi(self): + """Page contains SGI image and tile depth tags.""" + return "ImageDepth" in self.tags and "TileDepth" in self.tags + + @property + def is_vista(self): + """Software tag is 'ISS Vista'.""" + return self.software == "ISS Vista" + + @property + def is_metaseries(self): + """Page contains MDS MetaSeries metadata in ImageDescription tag.""" + if self.index > 1 or self.software != "MetaSeries": + return False + d = self.description + return d.startswith("") and d.endswith("") + + @property + def is_ome(self): + """Page contains OME-XML in ImageDescription tag.""" + if self.index > 1 or not self.description: + return False + d = self.description + return d[:14] == "" + + @property + def is_scn(self): + """Page contains Leica SCN XML in ImageDescription tag.""" + if self.index > 1 or not self.description: + return False + d = self.description + return d[:14] == "" + + @property + def is_micromanager(self): + """Page contains Micro-Manager metadata.""" + return "MicroManagerMetadata" in self.tags + + @property + def is_andor(self): + """Page contains Andor Technology tags.""" + return "AndorId" in self.tags + + @property + def is_pilatus(self): + """Page contains Pilatus tags.""" + return self.software[:8] == "TVX TIFF" and self.description[:2] == "# " + + @property + def is_epics(self): + """Page contains EPICS areaDetector tags.""" + return ( + self.description == "EPICS areaDetector" + or self.software == "EPICS areaDetector" + ) + + @property + def is_tvips(self): + """Page contains TVIPS metadata.""" + return "TVIPS" in self.tags + + @property + def is_fei(self): + """Page contains SFEG or HELIOS metadata.""" + return "FEI_SFEG" in self.tags or "FEI_HELIOS" in self.tags + + @property + def is_sem(self): + """Page contains Zeiss SEM metadata.""" + return "CZ_SEM" in self.tags + + @property + def is_svs(self): + """Page contains Aperio metadata.""" + return self.description[:20] == "Aperio Image Library" + + @property + def is_scanimage(self): + """Page contains ScanImage metadata.""" + return ( + self.description[:12] == "state.config" + or self.software[:22] == "SI.LINE_FORMAT_VERSION" + or "scanimage.SI." in self.description[-256:] + ) + + @property + def is_qptiff(self): + """Page contains PerkinElmer tissue images metadata.""" + # The ImageDescription tag contains XML with a top-level + # element + return self.software[:15] == "PerkinElmer-QPI" + + @property + def is_geotiff(self): + """Page contains GeoTIFF metadata.""" + return "GeoKeyDirectoryTag" in self.tags + + +class TiffFrame(object): + """Lightweight TIFF image file directory (IFD). + + Only a limited number of tag values are read from file, e.g. StripOffsets, + and StripByteCounts. Other tag values are assumed to be identical with a + specified TiffPage instance, the keyframe. + + TiffFrame is intended to reduce resource usage and speed up reading data + from file, not for introspection of metadata. + + Not compatible with Python 2. + + """ + + __slots__ = ( + "keyframe", + "parent", + "index", + "offset", + "dataoffsets", + "databytecounts", + ) + + is_mdgel = False + tags = {} + + def __init__(self, parent, index, keyframe): + """Read specified tags from file. + + The file handle position must be at the offset to a valid IFD. + + """ + self.keyframe = keyframe + self.parent = parent + self.index = index + self.dataoffsets = None + self.databytecounts = None + + unpack = struct.unpack + fh = parent.filehandle + self.offset = fh.tell() + try: + tagno = unpack(parent.tagnoformat, fh.read(parent.tagnosize))[0] + if tagno > 4096: + raise ValueError("suspicious number of tags") + except Exception: + raise ValueError("corrupted page list at offset %i" % self.offset) + + # tags = {} + tagcodes = {273, 279, 324, 325} # TIFF.FRAME_TAGS + tagsize = parent.tagsize + codeformat = parent.tagformat1[:2] + + data = fh.read(tagsize * tagno) + index = -tagsize + for _ in range(tagno): + index += tagsize + code = unpack(codeformat, data[index : index + 2])[0] + if code not in tagcodes: + continue + try: + tag = TiffTag(parent, data[index : index + tagsize]) + except TiffTag.Error as e: + warnings.warn(str(e)) + continue + if code == 273 or code == 324: + setattr(self, "dataoffsets", tag.value) + elif code == 279 or code == 325: + setattr(self, "databytecounts", tag.value) + # elif code == 270: + # tagname = tag.name + # if tagname not in tags: + # tags[tagname] = bytes2str(tag.value) + # elif 'ImageDescription1' not in tags: + # tags['ImageDescription1'] = bytes2str(tag.value) + # else: + # tags[tag.name] = tag.value + + def aspage(self): + """Return TiffPage from file.""" + self.parent.filehandle.seek(self.offset) + return TiffPage(self.parent, index=self.index, keyframe=None) + + def asarray(self, *args, **kwargs): + """Read image data from file and return as numpy array.""" + # TODO: fix TypeError on Python 2 + # "TypeError: unbound method asarray() must be called with TiffPage + # instance as first argument (got TiffFrame instance instead)" + kwargs["validate"] = False + return TiffPage.asarray(self, *args, **kwargs) + + def asrgb(self, *args, **kwargs): + """Read image data from file and return RGB image as numpy array.""" + kwargs["validate"] = False + return TiffPage.asrgb(self, *args, **kwargs) + + @property + def offsets_bytecounts(self): + """Return simplified offsets and bytecounts.""" + if self.keyframe.is_contiguous: + return self.dataoffsets[:1], self.keyframe.is_contiguous[1:] + return clean_offsets_counts(self.dataoffsets, self.databytecounts) + + @property + def is_contiguous(self): + """Return offset and size of contiguous data, else None.""" + if self.keyframe.is_contiguous: + return self.dataoffsets[0], self.keyframe.is_contiguous[1] + + @property + def is_memmappable(self): + """Return if page's image data in file can be memory-mapped.""" + return self.keyframe.is_memmappable + + def __getattr__(self, name): + """Return attribute from keyframe.""" + if name in TIFF.FRAME_ATTRS: + return getattr(self.keyframe, name) + # this error could be raised because an AttributeError was + # raised inside a @property function + raise AttributeError( + "'%s' object has no attribute '%s'" % (self.__class__.__name__, name) + ) + + def __str__(self, detail=0): + """Return string containing information about frame.""" + info = " ".join( + s for s in ("x".join(str(i) for i in self.shape), str(self.dtype)) + ) + return "TiffFrame %i @%i %s" % (self.index, self.offset, info) + + +class TiffTag(object): + """TIFF tag structure. + + Attributes + ---------- + name : string + Name of tag. + code : int + Decimal code of tag. + dtype : str + Datatype of tag data. One of TIFF DATA_FORMATS. + count : int + Number of values. + value : various types + Tag data as Python object. + ImageSourceData : int + Location of value in file. + + All attributes are read-only. + + """ + + __slots__ = ("code", "count", "dtype", "value", "valueoffset") + + class Error(Exception): + pass + + def __init__(self, parent, tagheader, **kwargs): + """Initialize instance from tag header.""" + fh = parent.filehandle + byteorder = parent.byteorder + unpack = struct.unpack + offsetsize = parent.offsetsize + + self.valueoffset = fh.tell() + offsetsize + 4 + code, type_ = unpack(parent.tagformat1, tagheader[:4]) + count, value = unpack(parent.tagformat2, tagheader[4:]) + + try: + dtype = TIFF.DATA_FORMATS[type_] + except KeyError: + raise TiffTag.Error("unknown tag data type %i" % type_) + + fmt = "%s%i%s" % (byteorder, count * int(dtype[0]), dtype[1]) + size = struct.calcsize(fmt) + if size > offsetsize or code in TIFF.TAG_READERS: + self.valueoffset = offset = unpack(parent.offsetformat, value)[0] + if offset < 8 or offset > fh.size - size: + raise TiffTag.Error("invalid tag value offset") + # if offset % 2: + # warnings.warn('tag value does not begin on word boundary') + fh.seek(offset) + if code in TIFF.TAG_READERS: + readfunc = TIFF.TAG_READERS[code] + value = readfunc(fh, byteorder, dtype, count, offsetsize) + elif type_ == 7 or (count > 1 and dtype[-1] == "B"): + value = read_bytes(fh, byteorder, dtype, count, offsetsize) + elif code in TIFF.TAGS or dtype[-1] == "s": + value = unpack(fmt, fh.read(size)) + else: + value = read_numpy(fh, byteorder, dtype, count, offsetsize) + elif dtype[-1] == "B" or type_ == 7: + value = value[:size] + else: + value = unpack(fmt, value[:size]) + + process = ( + code not in TIFF.TAG_READERS and code not in TIFF.TAG_TUPLE and type_ != 7 + ) + if process and dtype[-1] == "s" and isinstance(value[0], bytes): + # TIFF ASCII fields can contain multiple strings, + # each terminated with a NUL + value = value[0] + try: + value = bytes2str(stripascii(value).strip()) + except UnicodeDecodeError: + warnings.warn("tag %i: coercing invalid ASCII to bytes" % code) + dtype = "1B" + else: + if code in TIFF.TAG_ENUM: + t = TIFF.TAG_ENUM[code] + try: + value = tuple(t(v) for v in value) + except ValueError as e: + warnings.warn(str(e)) + if process: + if len(value) == 1: + value = value[0] + + self.code = code + self.dtype = dtype + self.count = count + self.value = value + + @property + def name(self): + return TIFF.TAGS.get(self.code, str(self.code)) + + def _fix_lsm_bitspersample(self, parent): + """Correct LSM bitspersample tag. + + Old LSM writers may use a separate region for two 16-bit values, + although they fit into the tag value element of the tag. + + """ + if self.code == 258 and self.count == 2: + # TODO: test this case; need example file + warnings.warn("correcting LSM bitspersample tag") + tof = parent.offsetformat[parent.offsetsize] + self.valueoffset = struct.unpack(tof, self._value)[0] + parent.filehandle.seek(self.valueoffset) + self.value = struct.unpack(">> # read image stack from sequence of TIFF files + >>> imsave('temp_C001T001.tif', numpy.random.rand(64, 64)) + >>> imsave('temp_C001T002.tif', numpy.random.rand(64, 64)) + >>> tifs = TiffSequence('temp_C001*.tif') + >>> tifs.shape + (1, 2) + >>> tifs.axes + 'CT' + >>> data = tifs.asarray() + >>> data.shape + (1, 2, 64, 64) + + """ + + _patterns = { + "axes": r""" + # matches Olympus OIF and Leica TIFF series + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4})) + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + """ + } + + class ParseError(Exception): + pass + + def __init__(self, files, imread=TiffFile, pattern="axes", *args, **kwargs): + """Initialize instance from multiple files. + + Parameters + ---------- + files : str, pathlib.Path, or sequence thereof + Glob pattern or sequence of file names. + Binary streams are not supported. + imread : function or class + Image read function or class with asarray function returning numpy + array from single file. + pattern : str + Regular expression pattern that matches axes names and sequence + indices in file names. + By default, the pattern matches Olympus OIF and Leica TIFF series. + + """ + if isinstance(files, pathlib.Path): + files = str(files) + if isinstance(files, basestring): + files = natural_sorted(glob.glob(files)) + files = list(files) + if not files: + raise ValueError("no files found") + if isinstance(files[0], pathlib.Path): + files = [str(pathlib.Path(f)) for f in files] + elif not isinstance(files[0], basestring): + raise ValueError("not a file name") + self.files = files + + if hasattr(imread, "asarray"): + # redefine imread + _imread = imread + + def imread(fname, *args, **kwargs): + with _imread(fname) as im: + return im.asarray(*args, **kwargs) + + self.imread = imread + + self.pattern = self._patterns.get(pattern, pattern) + try: + self._parse() + if not self.axes: + self.axes = "I" + except self.ParseError: + self.axes = "I" + self.shape = (len(files),) + self._startindex = (0,) + self._indices = tuple((i,) for i in range(len(files))) + + def __str__(self): + """Return string with information about image sequence.""" + return "\n".join( + [ + self.files[0], + " size: %i" % len(self.files), + " axes: %s" % self.axes, + " shape: %s" % str(self.shape), + ] + ) + + def __len__(self): + return len(self.files) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + pass + + def asarray(self, out=None, *args, **kwargs): + """Read image data from all files and return as numpy array. + + The args and kwargs parameters are passed to the imread function. + + Raise IndexError or ValueError if image shapes do not match. + + """ + im = self.imread(self.files[0], *args, **kwargs) + shape = self.shape + im.shape + result = create_output(out, shape, dtype=im.dtype) + result = result.reshape(-1, *im.shape) + for index, fname in zip(self._indices, self.files): + index = [i - j for i, j in zip(index, self._startindex)] + index = numpy.ravel_multi_index(index, self.shape) + im = self.imread(fname, *args, **kwargs) + result[index] = im + result.shape = shape + return result + + def _parse(self): + """Get axes and shape from file names.""" + if not self.pattern: + raise self.ParseError("invalid pattern") + pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE) + matches = pattern.findall(self.files[0]) + if not matches: + raise self.ParseError("pattern does not match file names") + matches = matches[-1] + if len(matches) % 2: + raise self.ParseError("pattern does not match axis name and index") + axes = "".join(m for m in matches[::2] if m) + if not axes: + raise self.ParseError("pattern does not match file names") + + indices = [] + for fname in self.files: + matches = pattern.findall(fname)[-1] + if axes != "".join(m for m in matches[::2] if m): + raise ValueError("axes do not match within the image sequence") + indices.append([int(m) for m in matches[1::2] if m]) + shape = tuple(numpy.max(indices, axis=0)) + startindex = tuple(numpy.min(indices, axis=0)) + shape = tuple(i - j + 1 for i, j in zip(shape, startindex)) + if product(shape) != len(self.files): + warnings.warn("files are missing. Missing data are zeroed") + + self.axes = axes.upper() + self.shape = shape + self._indices = indices + self._startindex = startindex + + +class FileHandle(object): + """Binary file handle. + + A limited, special purpose file handler that can: + + * handle embedded files (for CZI within CZI files) + * re-open closed files (for multi-file formats, such as OME-TIFF) + * read and write numpy arrays and records from file like objects + + Only 'rb' and 'wb' modes are supported. Concurrently reading and writing + of the same stream is untested. + + When initialized from another file handle, do not use it unless this + FileHandle is closed. + + Attributes + ---------- + name : str + Name of the file. + path : str + Absolute path to file. + size : int + Size of file in bytes. + is_file : bool + If True, file has a filno and can be memory-mapped. + + All attributes are read-only. + + """ + + __slots__ = ( + "_fh", + "_file", + "_mode", + "_name", + "_dir", + "_lock", + "_offset", + "_size", + "_close", + "is_file", + ) + + def __init__(self, file, mode="rb", name=None, offset=None, size=None): + """Initialize file handle from file name or another file handle. + + Parameters + ---------- + file : str, pathlib.Path, binary stream, or FileHandle + File name or seekable binary stream, such as an open file + or BytesIO. + mode : str + File open mode in case 'file' is a file name. Must be 'rb' or 'wb'. + name : str + Optional name of file in case 'file' is a binary stream. + offset : int + Optional start position of embedded file. By default, this is + the current file position. + size : int + Optional size of embedded file. By default, this is the number + of bytes from the 'offset' to the end of the file. + + """ + self._file = file + self._fh = None + self._mode = mode + self._name = name + self._dir = "" + self._offset = offset + self._size = size + self._close = True + self.is_file = False + self._lock = NullContext() + self.open() + + def open(self): + """Open or re-open file.""" + if self._fh: + return # file is open + + if isinstance(self._file, pathlib.Path): + self._file = str(self._file) + if isinstance(self._file, basestring): + # file name + self._file = os.path.realpath(self._file) + self._dir, self._name = os.path.split(self._file) + self._fh = open(self._file, self._mode) + self._close = True + if self._offset is None: + self._offset = 0 + elif isinstance(self._file, FileHandle): + # FileHandle + self._fh = self._file._fh + if self._offset is None: + self._offset = 0 + self._offset += self._file._offset + self._close = False + if not self._name: + if self._offset: + name, ext = os.path.splitext(self._file._name) + self._name = "%s@%i%s" % (name, self._offset, ext) + else: + self._name = self._file._name + if self._mode and self._mode != self._file._mode: + raise ValueError("FileHandle has wrong mode") + self._mode = self._file._mode + self._dir = self._file._dir + elif hasattr(self._file, "seek"): + # binary stream: open file, BytesIO + try: + self._file.tell() + except Exception: + raise ValueError("binary stream is not seekable") + self._fh = self._file + if self._offset is None: + self._offset = self._file.tell() + self._close = False + if not self._name: + try: + self._dir, self._name = os.path.split(self._fh.name) + except AttributeError: + self._name = "Unnamed binary stream" + try: + self._mode = self._fh.mode + except AttributeError: + pass + else: + raise ValueError( + "The first parameter must be a file name, " + "seekable binary stream, or FileHandle" + ) + + if self._offset: + self._fh.seek(self._offset) + + if self._size is None: + pos = self._fh.tell() + self._fh.seek(self._offset, 2) + self._size = self._fh.tell() + self._fh.seek(pos) + + try: + self._fh.fileno() + self.is_file = True + except Exception: + self.is_file = False + + def read(self, size=-1): + """Read 'size' bytes from file, or until EOF is reached.""" + if size < 0 and self._offset: + size = self._size + return self._fh.read(size) + + def write(self, bytestring): + """Write bytestring to file.""" + return self._fh.write(bytestring) + + def flush(self): + """Flush write buffers if applicable.""" + return self._fh.flush() + + def memmap_array(self, dtype, shape, offset=0, mode="r", order="C"): + """Return numpy.memmap of data stored in file.""" + if not self.is_file: + raise ValueError("Cannot memory-map file without fileno") + return numpy.memmap( + self._fh, + dtype=dtype, + mode=mode, + offset=self._offset + offset, + shape=shape, + order=order, + ) + + def read_array( + self, dtype, count=-1, sep="", chunksize=2**25, out=None, native=False + ): + """Return numpy array from file. + + Work around numpy issue #2230, "numpy.fromfile does not accept + StringIO object" https://github.com/numpy/numpy/issues/2230. + + """ + fh = self._fh + dtype = numpy.dtype(dtype) + size = self._size if count < 0 else count * dtype.itemsize + + if out is None: + try: + result = numpy.fromfile(fh, dtype, count, sep) + except IOError: + # ByteIO + data = fh.read(size) + result = numpy.frombuffer(data, dtype, count).copy() + if native and not result.dtype.isnative: + # swap byte order and dtype without copy + result.byteswap(True) + result = result.newbyteorder() + return result + + # Read data from file in chunks and copy to output array + shape = out.shape + size = min(out.nbytes, size) + out = out.reshape(-1) + index = 0 + while size > 0: + data = fh.read(min(chunksize, size)) + datasize = len(data) + if datasize == 0: + break + size -= datasize + data = numpy.frombuffer(data, dtype) + out[index : index + data.size] = data + index += data.size + + if hasattr(out, "flush"): + out.flush() + return out.reshape(shape) + + def read_record(self, dtype, shape=1, byteorder=None): + """Return numpy record from file.""" + rec = numpy.rec + try: + record = rec.fromfile(self._fh, dtype, shape, byteorder=byteorder) + except Exception: + dtype = numpy.dtype(dtype) + if shape is None: + shape = self._size // dtype.itemsize + size = product(sequence(shape)) * dtype.itemsize + data = self._fh.read(size) + record = rec.fromstring(data, dtype, shape, byteorder=byteorder) + return record[0] if shape == 1 else record + + def write_empty(self, size): + """Append size bytes to file. Position must be at end of file.""" + if size < 1: + return + self._fh.seek(size - 1, 1) + self._fh.write(b"\x00") + + def write_array(self, data): + """Write numpy array to binary file.""" + try: + data.tofile(self._fh) + except Exception: + # BytesIO + self._fh.write(data.tostring()) + + def tell(self): + """Return file's current position.""" + return self._fh.tell() - self._offset + + def seek(self, offset, whence=0): + """Set file's current position.""" + if self._offset: + if whence == 0: + self._fh.seek(self._offset + offset, whence) + return + elif whence == 2 and self._size > 0: + self._fh.seek(self._offset + self._size + offset, 0) + return + self._fh.seek(offset, whence) + + def close(self): + """Close file.""" + if self._close and self._fh: + self._fh.close() + self._fh = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def __getattr__(self, name): + """Return attribute from underlying file object.""" + if self._offset: + warnings.warn("FileHandle: '%s' not implemented for embedded files" % name) + return getattr(self._fh, name) + + @property + def name(self): + return self._name + + @property + def dirname(self): + return self._dir + + @property + def path(self): + return os.path.join(self._dir, self._name) + + @property + def size(self): + return self._size + + @property + def closed(self): + return self._fh is None + + @property + def lock(self): + return self._lock + + @lock.setter + def lock(self, value): + self._lock = threading.RLock() if value else NullContext() + + +class NullContext(object): + """Null context manager. + + >>> with NullContext(): + ... pass + + """ + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + +class OpenFileCache(object): + """Keep files open.""" + + __slots__ = ("files", "past", "lock", "size") + + def __init__(self, size, lock=None): + """Initialize open file cache.""" + self.past = [] # FIFO of opened files + self.files = {} # refcounts of opened files + self.lock = NullContext() if lock is None else lock + self.size = int(size) + + def open(self, filehandle): + """Re-open file if necessary.""" + with self.lock: + if filehandle in self.files: + self.files[filehandle] += 1 + elif filehandle.closed: + filehandle.open() + self.files[filehandle] = 1 + self.past.append(filehandle) + + def close(self, filehandle): + """Close opened file if no longer used.""" + with self.lock: + if filehandle in self.files: + self.files[filehandle] -= 1 + # trim the file cache + index = 0 + size = len(self.past) + while size > self.size and index < size: + filehandle = self.past[index] + if self.files[filehandle] == 0: + filehandle.close() + del self.files[filehandle] + del self.past[index] + size -= 1 + else: + index += 1 + + def clear(self): + """Close all opened files if not in use.""" + with self.lock: + for filehandle, refcount in list(self.files.items()): + if refcount == 0: + filehandle.close() + del self.files[filehandle] + del self.past[self.past.index(filehandle)] + + +class LazyConst(object): + """Class whose attributes are computed on first access from its methods.""" + + def __init__(self, cls): + self._cls = cls + self.__doc__ = getattr(cls, "__doc__") + + def __getattr__(self, name): + func = getattr(self._cls, name) + if not callable(func): + return func + try: + value = func() + except TypeError: + # Python 2 unbound method + value = func.__func__() + setattr(self, name, value) + return value + + +@LazyConst +class TIFF(object): + """Namespace for module constants.""" + + def TAGS(): + # TIFF tag codes and names from TIFF6, TIFF/EP, EXIF, and other specs + return { + 11: "ProcessingSoftware", + 254: "NewSubfileType", + 255: "SubfileType", + 256: "ImageWidth", + 257: "ImageLength", + 258: "BitsPerSample", + 259: "Compression", + 262: "PhotometricInterpretation", + 263: "Thresholding", + 264: "CellWidth", + 265: "CellLength", + 266: "FillOrder", + 269: "DocumentName", + 270: "ImageDescription", + 271: "Make", + 272: "Model", + 273: "StripOffsets", + 274: "Orientation", + 277: "SamplesPerPixel", + 278: "RowsPerStrip", + 279: "StripByteCounts", + 280: "MinSampleValue", + 281: "MaxSampleValue", + 282: "XResolution", + 283: "YResolution", + 284: "PlanarConfiguration", + 285: "PageName", + 286: "XPosition", + 287: "YPosition", + 288: "FreeOffsets", + 289: "FreeByteCounts", + 290: "GrayResponseUnit", + 291: "GrayResponseCurve", + 292: "T4Options", + 293: "T6Options", + 296: "ResolutionUnit", + 297: "PageNumber", + 300: "ColorResponseUnit", + 301: "TransferFunction", + 305: "Software", + 306: "DateTime", + 315: "Artist", + 316: "HostComputer", + 317: "Predictor", + 318: "WhitePoint", + 319: "PrimaryChromaticities", + 320: "ColorMap", + 321: "HalftoneHints", + 322: "TileWidth", + 323: "TileLength", + 324: "TileOffsets", + 325: "TileByteCounts", + 326: "BadFaxLines", + 327: "CleanFaxData", + 328: "ConsecutiveBadFaxLines", + 330: "SubIFDs", + 332: "InkSet", + 333: "InkNames", + 334: "NumberOfInks", + 336: "DotRange", + 337: "TargetPrinter", + 338: "ExtraSamples", + 339: "SampleFormat", + 340: "SMinSampleValue", + 341: "SMaxSampleValue", + 342: "TransferRange", + 343: "ClipPath", + 344: "XClipPathUnits", + 345: "YClipPathUnits", + 346: "Indexed", + 347: "JPEGTables", + 351: "OPIProxy", + 400: "GlobalParametersIFD", + 401: "ProfileType", + 402: "FaxProfile", + 403: "CodingMethods", + 404: "VersionYear", + 405: "ModeNumber", + 433: "Decode", + 434: "DefaultImageColor", + 435: "T82Options", + 437: "JPEGTables_", # 347 + 512: "JPEGProc", + 513: "JPEGInterchangeFormat", + 514: "JPEGInterchangeFormatLength", + 515: "JPEGRestartInterval", + 517: "JPEGLosslessPredictors", + 518: "JPEGPointTransforms", + 519: "JPEGQTables", + 520: "JPEGDCTables", + 521: "JPEGACTables", + 529: "YCbCrCoefficients", + 530: "YCbCrSubSampling", + 531: "YCbCrPositioning", + 532: "ReferenceBlackWhite", + 559: "StripRowCounts", + 700: "XMP", # XMLPacket + 769: "GDIGamma", # GDI+ + 770: "ICCProfileDescriptor", # GDI+ + 771: "SRGBRenderingIntent", # GDI+ + 800: "ImageTitle", # GDI+ + 999: "USPTO_Miscellaneous", + 4864: "AndorId", # TODO: Andor Technology 4864 - 5030 + 4869: "AndorTemperature", + 4876: "AndorExposureTime", + 4878: "AndorKineticCycleTime", + 4879: "AndorAccumulations", + 4881: "AndorAcquisitionCycleTime", + 4882: "AndorReadoutTime", + 4884: "AndorPhotonCounting", + 4885: "AndorEmDacLevel", + 4890: "AndorFrames", + 4896: "AndorHorizontalFlip", + 4897: "AndorVerticalFlip", + 4898: "AndorClockwise", + 4899: "AndorCounterClockwise", + 4904: "AndorVerticalClockVoltage", + 4905: "AndorVerticalShiftSpeed", + 4907: "AndorPreAmpSetting", + 4908: "AndorCameraSerial", + 4911: "AndorActualTemperature", + 4912: "AndorBaselineClamp", + 4913: "AndorPrescans", + 4914: "AndorModel", + 4915: "AndorChipSizeX", + 4916: "AndorChipSizeY", + 4944: "AndorBaselineOffset", + 4966: "AndorSoftwareVersion", + 18246: "Rating", + 18247: "XP_DIP_XML", + 18248: "StitchInfo", + 18249: "RatingPercent", + 20481: "ResolutionXUnit", # GDI+ + 20482: "ResolutionYUnit", # GDI+ + 20483: "ResolutionXLengthUnit", # GDI+ + 20484: "ResolutionYLengthUnit", # GDI+ + 20485: "PrintFlags", # GDI+ + 20486: "PrintFlagsVersion", # GDI+ + 20487: "PrintFlagsCrop", # GDI+ + 20488: "PrintFlagsBleedWidth", # GDI+ + 20489: "PrintFlagsBleedWidthScale", # GDI+ + 20490: "HalftoneLPI", # GDI+ + 20491: "HalftoneLPIUnit", # GDI+ + 20492: "HalftoneDegree", # GDI+ + 20493: "HalftoneShape", # GDI+ + 20494: "HalftoneMisc", # GDI+ + 20495: "HalftoneScreen", # GDI+ + 20496: "JPEGQuality", # GDI+ + 20497: "GridSize", # GDI+ + 20498: "ThumbnailFormat", # GDI+ + 20499: "ThumbnailWidth", # GDI+ + 20500: "ThumbnailHeight", # GDI+ + 20501: "ThumbnailColorDepth", # GDI+ + 20502: "ThumbnailPlanes", # GDI+ + 20503: "ThumbnailRawBytes", # GDI+ + 20504: "ThumbnailSize", # GDI+ + 20505: "ThumbnailCompressedSize", # GDI+ + 20506: "ColorTransferFunction", # GDI+ + 20507: "ThumbnailData", + 20512: "ThumbnailImageWidth", # GDI+ + 20513: "ThumbnailImageHeight", # GDI+ + 20514: "ThumbnailBitsPerSample", # GDI+ + 20515: "ThumbnailCompression", + 20516: "ThumbnailPhotometricInterp", # GDI+ + 20517: "ThumbnailImageDescription", # GDI+ + 20518: "ThumbnailEquipMake", # GDI+ + 20519: "ThumbnailEquipModel", # GDI+ + 20520: "ThumbnailStripOffsets", # GDI+ + 20521: "ThumbnailOrientation", # GDI+ + 20522: "ThumbnailSamplesPerPixel", # GDI+ + 20523: "ThumbnailRowsPerStrip", # GDI+ + 20524: "ThumbnailStripBytesCount", # GDI+ + 20525: "ThumbnailResolutionX", + 20526: "ThumbnailResolutionY", + 20527: "ThumbnailPlanarConfig", # GDI+ + 20528: "ThumbnailResolutionUnit", + 20529: "ThumbnailTransferFunction", + 20530: "ThumbnailSoftwareUsed", # GDI+ + 20531: "ThumbnailDateTime", # GDI+ + 20532: "ThumbnailArtist", # GDI+ + 20533: "ThumbnailWhitePoint", # GDI+ + 20534: "ThumbnailPrimaryChromaticities", # GDI+ + 20535: "ThumbnailYCbCrCoefficients", # GDI+ + 20536: "ThumbnailYCbCrSubsampling", # GDI+ + 20537: "ThumbnailYCbCrPositioning", + 20538: "ThumbnailRefBlackWhite", # GDI+ + 20539: "ThumbnailCopyRight", # GDI+ + 20545: "InteroperabilityIndex", + 20546: "InteroperabilityVersion", + 20624: "LuminanceTable", + 20625: "ChrominanceTable", + 20736: "FrameDelay", # GDI+ + 20737: "LoopCount", # GDI+ + 20738: "GlobalPalette", # GDI+ + 20739: "IndexBackground", # GDI+ + 20740: "IndexTransparent", # GDI+ + 20752: "PixelUnit", # GDI+ + 20753: "PixelPerUnitX", # GDI+ + 20754: "PixelPerUnitY", # GDI+ + 20755: "PaletteHistogram", # GDI+ + 28672: "SonyRawFileType", # Sony ARW + 28722: "VignettingCorrParams", # Sony ARW + 28725: "ChromaticAberrationCorrParams", # Sony ARW + 28727: "DistortionCorrParams", # Sony ARW + # Private tags >= 32768 + 32781: "ImageID", + 32931: "WangTag1", + 32932: "WangAnnotation", + 32933: "WangTag3", + 32934: "WangTag4", + 32953: "ImageReferencePoints", + 32954: "RegionXformTackPoint", + 32955: "WarpQuadrilateral", + 32956: "AffineTransformMat", + 32995: "Matteing", + 32996: "DataType", + 32997: "ImageDepth", + 32998: "TileDepth", + 33300: "ImageFullWidth", + 33301: "ImageFullLength", + 33302: "TextureFormat", + 33303: "TextureWrapModes", + 33304: "FieldOfViewCotangent", + 33305: "MatrixWorldToScreen", + 33306: "MatrixWorldToCamera", + 33405: "Model2", + 33421: "CFARepeatPatternDim", + 33422: "CFAPattern", + 33423: "BatteryLevel", + 33424: "KodakIFD", + 33434: "ExposureTime", + 33437: "FNumber", + 33432: "Copyright", + 33445: "MDFileTag", + 33446: "MDScalePixel", + 33447: "MDColorTable", + 33448: "MDLabName", + 33449: "MDSampleInfo", + 33450: "MDPrepDate", + 33451: "MDPrepTime", + 33452: "MDFileUnits", + 33550: "ModelPixelScaleTag", + 33589: "AdventScale", + 33590: "AdventRevision", + 33628: "UIC1tag", # Metamorph Universal Imaging Corp STK + 33629: "UIC2tag", + 33630: "UIC3tag", + 33631: "UIC4tag", + 33723: "IPTCNAA", + 33858: "ExtendedTagsOffset", # DEFF points IFD with private tags + 33918: "IntergraphPacketData", # INGRPacketDataTag + 33919: "IntergraphFlagRegisters", # INGRFlagRegisters + 33920: "IntergraphMatrixTag", # IrasBTransformationMatrix + 33921: "INGRReserved", + 33922: "ModelTiepointTag", + 33923: "LeicaMagic", + 34016: "Site", + 34017: "ColorSequence", + 34018: "IT8Header", + 34019: "RasterPadding", + 34020: "BitsPerRunLength", + 34021: "BitsPerExtendedRunLength", + 34022: "ColorTable", + 34023: "ImageColorIndicator", + 34024: "BackgroundColorIndicator", + 34025: "ImageColorValue", + 34026: "BackgroundColorValue", + 34027: "PixelIntensityRange", + 34028: "TransparencyIndicator", + 34029: "ColorCharacterization", + 34030: "HCUsage", + 34031: "TrapIndicator", + 34032: "CMYKEquivalent", + 34118: "CZ_SEM", # Zeiss SEM + 34152: "AFCP_IPTC", + 34232: "PixelMagicJBIGOptions", + 34263: "JPLCartoIFD", + 34122: "IPLAB", # number of images + 34264: "ModelTransformationTag", + 34306: "WB_GRGBLevels", # Leaf MOS + 34310: "LeafData", + 34361: "MM_Header", + 34362: "MM_Stamp", + 34363: "MM_Unknown", + 34377: "ImageResources", # Photoshop + 34386: "MM_UserBlock", + 34412: "CZ_LSMINFO", + 34665: "ExifTag", + 34675: "InterColorProfile", # ICCProfile + 34680: "FEI_SFEG", # + 34682: "FEI_HELIOS", # + 34683: "FEI_TITAN", # + 34687: "FXExtensions", + 34688: "MultiProfiles", + 34689: "SharedData", + 34690: "T88Options", + 34710: "MarCCD", # offset to MarCCD header + 34732: "ImageLayer", + 34735: "GeoKeyDirectoryTag", + 34736: "GeoDoubleParamsTag", + 34737: "GeoAsciiParamsTag", + 34750: "JBIGOptions", + 34821: "PIXTIFF", # ? Pixel Translations Inc + 34850: "ExposureProgram", + 34852: "SpectralSensitivity", + 34853: "GPSTag", # GPSIFD + 34855: "ISOSpeedRatings", + 34856: "OECF", + 34857: "Interlace", + 34858: "TimeZoneOffset", + 34859: "SelfTimerMode", + 34864: "SensitivityType", + 34865: "StandardOutputSensitivity", + 34866: "RecommendedExposureIndex", + 34867: "ISOSpeed", + 34868: "ISOSpeedLatitudeyyy", + 34869: "ISOSpeedLatitudezzz", + 34908: "HylaFAXFaxRecvParams", + 34909: "HylaFAXFaxSubAddress", + 34910: "HylaFAXFaxRecvTime", + 34911: "FaxDcs", + 34929: "FedexEDR", + 34954: "LeafSubIFD", + 34959: "Aphelion1", + 34960: "Aphelion2", + 34961: "AphelionInternal", # ADCIS + 36864: "ExifVersion", + 36867: "DateTimeOriginal", + 36868: "DateTimeDigitized", + 36873: "GooglePlusUploadCode", + 36880: "OffsetTime", + 36881: "OffsetTimeOriginal", + 36882: "OffsetTimeDigitized", + # TODO: Pilatus/CHESS/TV6 36864..37120 conflicting with Exif tags + # 36864: 'TVX ?', + # 36865: 'TVX_NumExposure', + # 36866: 'TVX_NumBackground', + # 36867: 'TVX_ExposureTime', + # 36868: 'TVX_BackgroundTime', + # 36870: 'TVX ?', + # 36873: 'TVX_SubBpp', + # 36874: 'TVX_SubWide', + # 36875: 'TVX_SubHigh', + # 36876: 'TVX_BlackLevel', + # 36877: 'TVX_DarkCurrent', + # 36878: 'TVX_ReadNoise', + # 36879: 'TVX_DarkCurrentNoise', + # 36880: 'TVX_BeamMonitor', + # 37120: 'TVX_UserVariables', # A/D values + 37121: "ComponentsConfiguration", + 37122: "CompressedBitsPerPixel", + 37377: "ShutterSpeedValue", + 37378: "ApertureValue", + 37379: "BrightnessValue", + 37380: "ExposureBiasValue", + 37381: "MaxApertureValue", + 37382: "SubjectDistance", + 37383: "MeteringMode", + 37384: "LightSource", + 37385: "Flash", + 37386: "FocalLength", + 37387: "FlashEnergy_", # 37387 + 37388: "SpatialFrequencyResponse_", # 37388 + 37389: "Noise", + 37390: "FocalPlaneXResolution", + 37391: "FocalPlaneYResolution", + 37392: "FocalPlaneResolutionUnit", + 37393: "ImageNumber", + 37394: "SecurityClassification", + 37395: "ImageHistory", + 37396: "SubjectLocation", + 37397: "ExposureIndex", + 37398: "TIFFEPStandardID", + 37399: "SensingMethod", + 37434: "CIP3DataFile", + 37435: "CIP3Sheet", + 37436: "CIP3Side", + 37439: "StoNits", + 37500: "MakerNote", + 37510: "UserComment", + 37520: "SubsecTime", + 37521: "SubsecTimeOriginal", + 37522: "SubsecTimeDigitized", + 37679: "MODIText", # Microsoft Office Document Imaging + 37680: "MODIOLEPropertySetStorage", + 37681: "MODIPositioning", + 37706: "TVIPS", # offset to TemData structure + 37707: "TVIPS1", + 37708: "TVIPS2", # same TemData structure as undefined + 37724: "ImageSourceData", # Photoshop + 37888: "Temperature", + 37889: "Humidity", + 37890: "Pressure", + 37891: "WaterDepth", + 37892: "Acceleration", + 37893: "CameraElevationAngle", + 40001: "MC_IpWinScal", # Media Cybernetics + 40100: "MC_IdOld", + 40965: "InteroperabilityTag", # InteropOffset + 40091: "XPTitle", + 40092: "XPComment", + 40093: "XPAuthor", + 40094: "XPKeywords", + 40095: "XPSubject", + 40960: "FlashpixVersion", + 40961: "ColorSpace", + 40962: "PixelXDimension", + 40963: "PixelYDimension", + 40964: "RelatedSoundFile", + 40976: "SamsungRawPointersOffset", + 40977: "SamsungRawPointersLength", + 41217: "SamsungRawByteOrder", + 41218: "SamsungRawUnknown", + 41483: "FlashEnergy", + 41484: "SpatialFrequencyResponse", + 41485: "Noise_", # 37389 + 41486: "FocalPlaneXResolution_", # 37390 + 41487: "FocalPlaneYResolution_", # 37391 + 41488: "FocalPlaneResolutionUnit_", # 37392 + 41489: "ImageNumber_", # 37393 + 41490: "SecurityClassification_", # 37394 + 41491: "ImageHistory_", # 37395 + 41492: "SubjectLocation_", # 37395 + 41493: "ExposureIndex_ ", # 37397 + 41494: "TIFF-EPStandardID", + 41495: "SensingMethod_", # 37399 + 41728: "FileSource", + 41729: "SceneType", + 41730: "CFAPattern_", # 33422 + 41985: "CustomRendered", + 41986: "ExposureMode", + 41987: "WhiteBalance", + 41988: "DigitalZoomRatio", + 41989: "FocalLengthIn35mmFilm", + 41990: "SceneCaptureType", + 41991: "GainControl", + 41992: "Contrast", + 41993: "Saturation", + 41994: "Sharpness", + 41995: "DeviceSettingDescription", + 41996: "SubjectDistanceRange", + 42016: "ImageUniqueID", + 42032: "CameraOwnerName", + 42033: "BodySerialNumber", + 42034: "LensSpecification", + 42035: "LensMake", + 42036: "LensModel", + 42037: "LensSerialNumber", + 42112: "GDAL_METADATA", + 42113: "GDAL_NODATA", + 42240: "Gamma", + 43314: "NIHImageHeader", + 44992: "ExpandSoftware", + 44993: "ExpandLens", + 44994: "ExpandFilm", + 44995: "ExpandFilterLens", + 44996: "ExpandScanner", + 44997: "ExpandFlashLamp", + 48129: "PixelFormat", # HDP and WDP + 48130: "Transformation", + 48131: "Uncompressed", + 48132: "ImageType", + 48256: "ImageWidth_", # 256 + 48257: "ImageHeight_", + 48258: "WidthResolution", + 48259: "HeightResolution", + 48320: "ImageOffset", + 48321: "ImageByteCount", + 48322: "AlphaOffset", + 48323: "AlphaByteCount", + 48324: "ImageDataDiscard", + 48325: "AlphaDataDiscard", + 50215: "OceScanjobDescription", + 50216: "OceApplicationSelector", + 50217: "OceIdentificationNumber", + 50218: "OceImageLogicCharacteristics", + 50255: "Annotations", + 50288: "MC_Id", # Media Cybernetics + 50289: "MC_XYPosition", + 50290: "MC_ZPosition", + 50291: "MC_XYCalibration", + 50292: "MC_LensCharacteristics", + 50293: "MC_ChannelName", + 50294: "MC_ExcitationWavelength", + 50295: "MC_TimeStamp", + 50296: "MC_FrameProperties", + 50341: "PrintImageMatching", + 50495: "PCO_RAW", # TODO: PCO CamWare + 50547: "OriginalFileName", + 50560: "USPTO_OriginalContentType", # US Patent Office + 50561: "USPTO_RotationCode", + 50656: "CR2CFAPattern", + 50706: "DNGVersion", # DNG 50706 .. 51112 + 50707: "DNGBackwardVersion", + 50708: "UniqueCameraModel", + 50709: "LocalizedCameraModel", + 50710: "CFAPlaneColor", + 50711: "CFALayout", + 50712: "LinearizationTable", + 50713: "BlackLevelRepeatDim", + 50714: "BlackLevel", + 50715: "BlackLevelDeltaH", + 50716: "BlackLevelDeltaV", + 50717: "WhiteLevel", + 50718: "DefaultScale", + 50719: "DefaultCropOrigin", + 50720: "DefaultCropSize", + 50721: "ColorMatrix1", + 50722: "ColorMatrix2", + 50723: "CameraCalibration1", + 50724: "CameraCalibration2", + 50725: "ReductionMatrix1", + 50726: "ReductionMatrix2", + 50727: "AnalogBalance", + 50728: "AsShotNeutral", + 50729: "AsShotWhiteXY", + 50730: "BaselineExposure", + 50731: "BaselineNoise", + 50732: "BaselineSharpness", + 50733: "BayerGreenSplit", + 50734: "LinearResponseLimit", + 50735: "CameraSerialNumber", + 50736: "LensInfo", + 50737: "ChromaBlurRadius", + 50738: "AntiAliasStrength", + 50739: "ShadowScale", + 50740: "DNGPrivateData", + 50741: "MakerNoteSafety", + 50752: "RawImageSegmentation", + 50778: "CalibrationIlluminant1", + 50779: "CalibrationIlluminant2", + 50780: "BestQualityScale", + 50781: "RawDataUniqueID", + 50784: "AliasLayerMetadata", + 50827: "OriginalRawFileName", + 50828: "OriginalRawFileData", + 50829: "ActiveArea", + 50830: "MaskedAreas", + 50831: "AsShotICCProfile", + 50832: "AsShotPreProfileMatrix", + 50833: "CurrentICCProfile", + 50834: "CurrentPreProfileMatrix", + 50838: "IJMetadataByteCounts", + 50839: "IJMetadata", + 50844: "RPCCoefficientTag", + 50879: "ColorimetricReference", + 50885: "SRawType", + 50898: "PanasonicTitle", + 50899: "PanasonicTitle2", + 50931: "CameraCalibrationSignature", + 50932: "ProfileCalibrationSignature", + 50933: "ProfileIFD", + 50934: "AsShotProfileName", + 50935: "NoiseReductionApplied", + 50936: "ProfileName", + 50937: "ProfileHueSatMapDims", + 50938: "ProfileHueSatMapData1", + 50939: "ProfileHueSatMapData2", + 50940: "ProfileToneCurve", + 50941: "ProfileEmbedPolicy", + 50942: "ProfileCopyright", + 50964: "ForwardMatrix1", + 50965: "ForwardMatrix2", + 50966: "PreviewApplicationName", + 50967: "PreviewApplicationVersion", + 50968: "PreviewSettingsName", + 50969: "PreviewSettingsDigest", + 50970: "PreviewColorSpace", + 50971: "PreviewDateTime", + 50972: "RawImageDigest", + 50973: "OriginalRawFileDigest", + 50974: "SubTileBlockSize", + 50975: "RowInterleaveFactor", + 50981: "ProfileLookTableDims", + 50982: "ProfileLookTableData", + 51008: "OpcodeList1", + 51009: "OpcodeList2", + 51022: "OpcodeList3", + 51023: "FibicsXML", # + 51041: "NoiseProfile", + 51043: "TimeCodes", + 51044: "FrameRate", + 51058: "TStop", + 51081: "ReelName", + 51089: "OriginalDefaultFinalSize", + 51090: "OriginalBestQualitySize", + 51091: "OriginalDefaultCropSize", + 51105: "CameraLabel", + 51107: "ProfileHueSatMapEncoding", + 51108: "ProfileLookTableEncoding", + 51109: "BaselineExposureOffset", + 51110: "DefaultBlackRender", + 51111: "NewRawImageDigest", + 51112: "RawToPreviewGain", + 51125: "DefaultUserCrop", + 51123: "MicroManagerMetadata", + 59932: "Padding", + 59933: "OffsetSchema", + # Reusable Tags 65000-65535 + # 65000: Dimap_Document XML + # 65000-65112: Photoshop Camera RAW EXIF tags + # 65000: 'OwnerName', + # 65001: 'SerialNumber', + # 65002: 'Lens', + # 65024: 'KDC_IFD', + # 65100: 'RawFile', + # 65101: 'Converter', + # 65102: 'WhiteBalance', + # 65105: 'Exposure', + # 65106: 'Shadows', + # 65107: 'Brightness', + # 65108: 'Contrast', + # 65109: 'Saturation', + # 65110: 'Sharpness', + # 65111: 'Smoothness', + # 65112: 'MoireFilter', + 65200: "FlexXML", # + 65563: "PerSample", + } + + def TAG_NAMES(): + return {v: c for c, v in TIFF.TAGS.items()} + + def TAG_READERS(): + # Map TIFF tag codes to import functions + return { + 320: read_colormap, + # 700: read_bytes, # read_utf8, + # 34377: read_bytes, + 33723: read_bytes, + # 34675: read_bytes, + 33628: read_uic1tag, # Universal Imaging Corp STK + 33629: read_uic2tag, + 33630: read_uic3tag, + 33631: read_uic4tag, + 34118: read_cz_sem, # Carl Zeiss SEM + 34361: read_mm_header, # Olympus FluoView + 34362: read_mm_stamp, + 34363: read_numpy, # MM_Unknown + 34386: read_numpy, # MM_UserBlock + 34412: read_cz_lsminfo, # Carl Zeiss LSM + 34680: read_fei_metadata, # S-FEG + 34682: read_fei_metadata, # Helios NanoLab + 37706: read_tvips_header, # TVIPS EMMENU + 37724: read_bytes, # ImageSourceData + 33923: read_bytes, # read_leica_magic + 43314: read_nih_image_header, + # 40001: read_bytes, + 40100: read_bytes, + 50288: read_bytes, + 50296: read_bytes, + 50839: read_bytes, + 51123: read_json, + 34665: read_exif_ifd, + 34853: read_gps_ifd, + 40965: read_interoperability_ifd, + } + + def TAG_TUPLE(): + # Tags whose values must be stored as tuples + return frozenset((273, 279, 324, 325, 530, 531, 34736)) + + def TAG_ATTRIBUTES(): + # Map tag codes to TiffPage attribute names + return { + "ImageWidth": "imagewidth", + "ImageLength": "imagelength", + "BitsPerSample": "bitspersample", + "Compression": "compression", + "PlanarConfiguration": "planarconfig", + "FillOrder": "fillorder", + "PhotometricInterpretation": "photometric", + "ColorMap": "colormap", + "ImageDescription": "description", + "ImageDescription1": "description1", + "SamplesPerPixel": "samplesperpixel", + "RowsPerStrip": "rowsperstrip", + "Software": "software", + "Predictor": "predictor", + "TileWidth": "tilewidth", + "TileLength": "tilelength", + "ExtraSamples": "extrasamples", + "SampleFormat": "sampleformat", + "ImageDepth": "imagedepth", + "TileDepth": "tiledepth", + } + + def TAG_ENUM(): + return { + # 254: TIFF.FILETYPE, + 255: TIFF.OFILETYPE, + 259: TIFF.COMPRESSION, + 262: TIFF.PHOTOMETRIC, + 263: TIFF.THRESHHOLD, + 266: TIFF.FILLORDER, + 274: TIFF.ORIENTATION, + 284: TIFF.PLANARCONFIG, + 290: TIFF.GRAYRESPONSEUNIT, + # 292: TIFF.GROUP3OPT, + # 293: TIFF.GROUP4OPT, + 296: TIFF.RESUNIT, + 300: TIFF.COLORRESPONSEUNIT, + 317: TIFF.PREDICTOR, + 338: TIFF.EXTRASAMPLE, + 339: TIFF.SAMPLEFORMAT, + # 512: TIFF.JPEGPROC, + # 531: TIFF.YCBCRPOSITION, + } + + def FILETYPE(): + class FILETYPE(enum.IntFlag): + # Python 3.6 only + UNDEFINED = 0 + REDUCEDIMAGE = 1 + PAGE = 2 + MASK = 4 + + return FILETYPE + + def OFILETYPE(): + class OFILETYPE(enum.IntEnum): + UNDEFINED = 0 + IMAGE = 1 + REDUCEDIMAGE = 2 + PAGE = 3 + + return OFILETYPE + + def COMPRESSION(): + class COMPRESSION(enum.IntEnum): + NONE = 1 # Uncompressed + CCITTRLE = 2 # CCITT 1D + CCITT_T4 = 3 # 'T4/Group 3 Fax', + CCITT_T6 = 4 # 'T6/Group 4 Fax', + LZW = 5 + OJPEG = 6 # old-style JPEG + JPEG = 7 + ADOBE_DEFLATE = 8 + JBIG_BW = 9 + JBIG_COLOR = 10 + JPEG_99 = 99 + KODAK_262 = 262 + NEXT = 32766 + SONY_ARW = 32767 + PACKED_RAW = 32769 + SAMSUNG_SRW = 32770 + CCIRLEW = 32771 + SAMSUNG_SRW2 = 32772 + PACKBITS = 32773 + THUNDERSCAN = 32809 + IT8CTPAD = 32895 + IT8LW = 32896 + IT8MP = 32897 + IT8BL = 32898 + PIXARFILM = 32908 + PIXARLOG = 32909 + DEFLATE = 32946 + DCS = 32947 + APERIO_JP2000_YCBC = 33003 # Leica Aperio + APERIO_JP2000_RGB = 33005 # Leica Aperio + JBIG = 34661 + SGILOG = 34676 + SGILOG24 = 34677 + JPEG2000 = 34712 + NIKON_NEF = 34713 + JBIG2 = 34715 + MDI_BINARY = 34718 # 'Microsoft Document Imaging + MDI_PROGRESSIVE = 34719 # 'Microsoft Document Imaging + MDI_VECTOR = 34720 # 'Microsoft Document Imaging + JPEG_LOSSY = 34892 + LZMA = 34925 + ZSTD = 34926 + OPS_PNG = 34933 # Objective Pathology Services + OPS_JPEGXR = 34934 # Objective Pathology Services + PIXTIFF = 50013 + KODAK_DCR = 65000 + PENTAX_PEF = 65535 + # def __bool__(self): return self != 1 # Python 3.6 only + + return COMPRESSION + + def PHOTOMETRIC(): + class PHOTOMETRIC(enum.IntEnum): + MINISWHITE = 0 + MINISBLACK = 1 + RGB = 2 + PALETTE = 3 + MASK = 4 + SEPARATED = 5 # CMYK + YCBCR = 6 + CIELAB = 8 + ICCLAB = 9 + ITULAB = 10 + CFA = 32803 # Color Filter Array + LOGL = 32844 + LOGLUV = 32845 + LINEAR_RAW = 34892 + + return PHOTOMETRIC + + def THRESHHOLD(): + class THRESHHOLD(enum.IntEnum): + BILEVEL = 1 + HALFTONE = 2 + ERRORDIFFUSE = 3 + + return THRESHHOLD + + def FILLORDER(): + class FILLORDER(enum.IntEnum): + MSB2LSB = 1 + LSB2MSB = 2 + + return FILLORDER + + def ORIENTATION(): + class ORIENTATION(enum.IntEnum): + TOPLEFT = 1 + TOPRIGHT = 2 + BOTRIGHT = 3 + BOTLEFT = 4 + LEFTTOP = 5 + RIGHTTOP = 6 + RIGHTBOT = 7 + LEFTBOT = 8 + + return ORIENTATION + + def PLANARCONFIG(): + class PLANARCONFIG(enum.IntEnum): + CONTIG = 1 + SEPARATE = 2 + + return PLANARCONFIG + + def GRAYRESPONSEUNIT(): + class GRAYRESPONSEUNIT(enum.IntEnum): + _10S = 1 + _100S = 2 + _1000S = 3 + _10000S = 4 + _100000S = 5 + + return GRAYRESPONSEUNIT + + def GROUP4OPT(): + class GROUP4OPT(enum.IntEnum): + UNCOMPRESSED = 2 + + return GROUP4OPT + + def RESUNIT(): + class RESUNIT(enum.IntEnum): + NONE = 1 + INCH = 2 + CENTIMETER = 3 + # def __bool__(self): return self != 1 # Python 3.6 only + + return RESUNIT + + def COLORRESPONSEUNIT(): + class COLORRESPONSEUNIT(enum.IntEnum): + _10S = 1 + _100S = 2 + _1000S = 3 + _10000S = 4 + _100000S = 5 + + return COLORRESPONSEUNIT + + def PREDICTOR(): + class PREDICTOR(enum.IntEnum): + NONE = 1 + HORIZONTAL = 2 + FLOATINGPOINT = 3 + # def __bool__(self): return self != 1 # Python 3.6 only + + return PREDICTOR + + def EXTRASAMPLE(): + class EXTRASAMPLE(enum.IntEnum): + UNSPECIFIED = 0 + ASSOCALPHA = 1 + UNASSALPHA = 2 + + return EXTRASAMPLE + + def SAMPLEFORMAT(): + class SAMPLEFORMAT(enum.IntEnum): + UINT = 1 + INT = 2 + IEEEFP = 3 + VOID = 4 + COMPLEXINT = 5 + COMPLEXIEEEFP = 6 + + return SAMPLEFORMAT + + def DATATYPES(): + class DATATYPES(enum.IntEnum): + NOTYPE = 0 + BYTE = 1 + ASCII = 2 + SHORT = 3 + LONG = 4 + RATIONAL = 5 + SBYTE = 6 + UNDEFINED = 7 + SSHORT = 8 + SLONG = 9 + SRATIONAL = 10 + FLOAT = 11 + DOUBLE = 12 + IFD = 13 + UNICODE = 14 + COMPLEX = 15 + LONG8 = 16 + SLONG8 = 17 + IFD8 = 18 + + return DATATYPES + + def DATA_FORMATS(): + # Map TIFF DATATYPES to Python struct formats + return { + 1: "1B", # BYTE 8-bit unsigned integer. + 2: "1s", # ASCII 8-bit byte that contains a 7-bit ASCII code; + # the last byte must be NULL (binary zero). + 3: "1H", # SHORT 16-bit (2-byte) unsigned integer + 4: "1I", # LONG 32-bit (4-byte) unsigned integer. + 5: "2I", # RATIONAL Two LONGs: the first represents the numerator + # of a fraction; the second, the denominator. + 6: "1b", # SBYTE An 8-bit signed (twos-complement) integer. + 7: "1B", # UNDEFINED An 8-bit byte that may contain anything, + # depending on the definition of the field. + 8: "1h", # SSHORT A 16-bit (2-byte) signed (twos-complement) + # integer. + 9: "1i", # SLONG A 32-bit (4-byte) signed (twos-complement) + # integer. + 10: "2i", # SRATIONAL Two SLONGs: the first represents the + # numerator of a fraction, the second the denominator. + 11: "1f", # FLOAT Single precision (4-byte) IEEE format. + 12: "1d", # DOUBLE Double precision (8-byte) IEEE format. + 13: "1I", # IFD unsigned 4 byte IFD offset. + # 14: '', # UNICODE + # 15: '', # COMPLEX + 16: "1Q", # LONG8 unsigned 8 byte integer (BigTiff) + 17: "1q", # SLONG8 signed 8 byte integer (BigTiff) + 18: "1Q", # IFD8 unsigned 8 byte IFD offset (BigTiff) + } + + def DATA_DTYPES(): + # Map numpy dtypes to TIFF DATATYPES + return { + "B": 1, + "s": 2, + "H": 3, + "I": 4, + "2I": 5, + "b": 6, + "h": 8, + "i": 9, + "2i": 10, + "f": 11, + "d": 12, + "Q": 16, + "q": 17, + } + + def SAMPLE_DTYPES(): + # Map TIFF SampleFormats and BitsPerSample to numpy dtype + return { + (1, 1): "?", # bitmap + (1, 2): "B", + (1, 3): "B", + (1, 4): "B", + (1, 5): "B", + (1, 6): "B", + (1, 7): "B", + (1, 8): "B", + (1, 9): "H", + (1, 10): "H", + (1, 11): "H", + (1, 12): "H", + (1, 13): "H", + (1, 14): "H", + (1, 15): "H", + (1, 16): "H", + (1, 17): "I", + (1, 18): "I", + (1, 19): "I", + (1, 20): "I", + (1, 21): "I", + (1, 22): "I", + (1, 23): "I", + (1, 24): "I", + (1, 25): "I", + (1, 26): "I", + (1, 27): "I", + (1, 28): "I", + (1, 29): "I", + (1, 30): "I", + (1, 31): "I", + (1, 32): "I", + (1, 64): "Q", + (2, 8): "b", + (2, 16): "h", + (2, 32): "i", + (2, 64): "q", + (3, 16): "e", + (3, 32): "f", + (3, 64): "d", + (6, 64): "F", + (6, 128): "D", + (1, (5, 6, 5)): "B", + } + + def COMPESSORS(): + # Map COMPRESSION to compress functions and default compression levels + + class Compressors(object): + """Delay import compressor functions.""" + + def __init__(self): + self._compressors = {8: (zlib.compress, 6), 32946: (zlib.compress, 6)} + + def __getitem__(self, key): + if key in self._compressors: + return self._compressors[key] + + if key == 34925: + try: + import lzma # delayed import + except ImportError: + try: + import backports.lzma as lzma # delayed import + except ImportError: + raise KeyError + + def lzma_compress(x, level): + return lzma.compress(x) + + self._compressors[key] = lzma_compress, 0 + return lzma_compress, 0 + + if key == 34926: + try: + import zstd # delayed import + except ImportError: + raise KeyError + self._compressors[key] = zstd.compress, 9 + return zstd.compress, 9 + + raise KeyError + + def __contains__(self, key): + try: + self[key] + return True + except KeyError: + return False + + return Compressors() + + def DECOMPESSORS(): + # Map COMPRESSION to decompress functions + + class Decompressors(object): + """Delay import decompressor functions.""" + + def __init__(self): + self._decompressors = { + None: identityfunc, + 1: identityfunc, + 5: decode_lzw, + 8: zlib.decompress, + 32773: decode_packbits, + 32946: zlib.decompress, + } + + def __getitem__(self, key): + if key in self._decompressors: + return self._decompressors[key] + + if key == 7: + try: + from imagecodecs import jpeg, jpeg_12 + except ImportError: + raise KeyError + + def decode_jpeg(x, table, bps, colorspace=None): + if bps == 8: + return jpeg.decode_jpeg(x, table, colorspace) + elif bps == 12: + return jpeg_12.decode_jpeg_12(x, table, colorspace) + else: + raise ValueError("bitspersample not supported") + + self._decompressors[key] = decode_jpeg + return decode_jpeg + + if key == 34925: + try: + import lzma # delayed import + except ImportError: + try: + import backports.lzma as lzma # delayed import + except ImportError: + raise KeyError + self._decompressors[key] = lzma.decompress + return lzma.decompress + + if key == 34926: + try: + import zstd # delayed import + except ImportError: + raise KeyError + self._decompressors[key] = zstd.decompress + return zstd.decompress + raise KeyError + + def __contains__(self, item): + try: + self[item] + return True + except KeyError: + return False + + return Decompressors() + + def FRAME_ATTRS(): + # Attributes that a TiffFrame shares with its keyframe + return set("shape ndim size dtype axes is_final".split()) + + def FILE_FLAGS(): + # TiffFile and TiffPage 'is_\*' attributes + exclude = set( + "reduced final memmappable contiguous tiled " "chroma_subsampled".split() + ) + return set( + a[3:] for a in dir(TiffPage) if a[:3] == "is_" and a[3:] not in exclude + ) + + def FILE_EXTENSIONS(): + # TIFF file extensions + return tuple( + "tif tiff ome.tif lsm stk qptiff pcoraw " + "gel seq svs bif tf8 tf2 btf".split() + ) + + def FILEOPEN_FILTER(): + # String for use in Windows File Open box + return [ + ("%s files" % ext.upper(), "*.%s" % ext) for ext in TIFF.FILE_EXTENSIONS + ] + [("allfiles", "*")] + + def AXES_LABELS(): + # TODO: is there a standard for character axes labels? + axes = { + "X": "width", + "Y": "height", + "Z": "depth", + "S": "sample", # rgb(a) + "I": "series", # general sequence, plane, page, IFD + "T": "time", + "C": "channel", # color, emission wavelength + "A": "angle", + "P": "phase", # formerly F # P is Position in LSM! + "R": "tile", # region, point, mosaic + "H": "lifetime", # histogram + "E": "lambda", # excitation wavelength + "L": "exposure", # lux + "V": "event", + "Q": "other", + "M": "mosaic", # LSM 6 + } + axes.update(dict((v, k) for k, v in axes.items())) + return axes + + def ANDOR_TAGS(): + # Andor Technology tags #4864 - 5030 + return set(range(4864, 5030)) + + def EXIF_TAGS(): + tags = { + # 65000 - 65112 Photoshop Camera RAW EXIF tags + 65000: "OwnerName", + 65001: "SerialNumber", + 65002: "Lens", + 65100: "RawFile", + 65101: "Converter", + 65102: "WhiteBalance", + 65105: "Exposure", + 65106: "Shadows", + 65107: "Brightness", + 65108: "Contrast", + 65109: "Saturation", + 65110: "Sharpness", + 65111: "Smoothness", + 65112: "MoireFilter", + } + tags.update(TIFF.TAGS) + return tags + + def GPS_TAGS(): + return { + 0: "GPSVersionID", + 1: "GPSLatitudeRef", + 2: "GPSLatitude", + 3: "GPSLongitudeRef", + 4: "GPSLongitude", + 5: "GPSAltitudeRef", + 6: "GPSAltitude", + 7: "GPSTimeStamp", + 8: "GPSSatellites", + 9: "GPSStatus", + 10: "GPSMeasureMode", + 11: "GPSDOP", + 12: "GPSSpeedRef", + 13: "GPSSpeed", + 14: "GPSTrackRef", + 15: "GPSTrack", + 16: "GPSImgDirectionRef", + 17: "GPSImgDirection", + 18: "GPSMapDatum", + 19: "GPSDestLatitudeRef", + 20: "GPSDestLatitude", + 21: "GPSDestLongitudeRef", + 22: "GPSDestLongitude", + 23: "GPSDestBearingRef", + 24: "GPSDestBearing", + 25: "GPSDestDistanceRef", + 26: "GPSDestDistance", + 27: "GPSProcessingMethod", + 28: "GPSAreaInformation", + 29: "GPSDateStamp", + 30: "GPSDifferential", + 31: "GPSHPositioningError", + } + + def IOP_TAGS(): + return { + 1: "InteroperabilityIndex", + 2: "InteroperabilityVersion", + 4096: "RelatedImageFileFormat", + 4097: "RelatedImageWidth", + 4098: "RelatedImageLength", + } + + def GEO_KEYS(): + return { + 1024: "GTModelTypeGeoKey", + 1025: "GTRasterTypeGeoKey", + 1026: "GTCitationGeoKey", + 2048: "GeographicTypeGeoKey", + 2049: "GeogCitationGeoKey", + 2050: "GeogGeodeticDatumGeoKey", + 2051: "GeogPrimeMeridianGeoKey", + 2052: "GeogLinearUnitsGeoKey", + 2053: "GeogLinearUnitSizeGeoKey", + 2054: "GeogAngularUnitsGeoKey", + 2055: "GeogAngularUnitsSizeGeoKey", + 2056: "GeogEllipsoidGeoKey", + 2057: "GeogSemiMajorAxisGeoKey", + 2058: "GeogSemiMinorAxisGeoKey", + 2059: "GeogInvFlatteningGeoKey", + 2060: "GeogAzimuthUnitsGeoKey", + 2061: "GeogPrimeMeridianLongGeoKey", + 2062: "GeogTOWGS84GeoKey", + 3059: "ProjLinearUnitsInterpCorrectGeoKey", # GDAL + 3072: "ProjectedCSTypeGeoKey", + 3073: "PCSCitationGeoKey", + 3074: "ProjectionGeoKey", + 3075: "ProjCoordTransGeoKey", + 3076: "ProjLinearUnitsGeoKey", + 3077: "ProjLinearUnitSizeGeoKey", + 3078: "ProjStdParallel1GeoKey", + 3079: "ProjStdParallel2GeoKey", + 3080: "ProjNatOriginLongGeoKey", + 3081: "ProjNatOriginLatGeoKey", + 3082: "ProjFalseEastingGeoKey", + 3083: "ProjFalseNorthingGeoKey", + 3084: "ProjFalseOriginLongGeoKey", + 3085: "ProjFalseOriginLatGeoKey", + 3086: "ProjFalseOriginEastingGeoKey", + 3087: "ProjFalseOriginNorthingGeoKey", + 3088: "ProjCenterLongGeoKey", + 3089: "ProjCenterLatGeoKey", + 3090: "ProjCenterEastingGeoKey", + 3091: "ProjFalseOriginNorthingGeoKey", + 3092: "ProjScaleAtNatOriginGeoKey", + 3093: "ProjScaleAtCenterGeoKey", + 3094: "ProjAzimuthAngleGeoKey", + 3095: "ProjStraightVertPoleLongGeoKey", + 3096: "ProjRectifiedGridAngleGeoKey", + 4096: "VerticalCSTypeGeoKey", + 4097: "VerticalCitationGeoKey", + 4098: "VerticalDatumGeoKey", + 4099: "VerticalUnitsGeoKey", + } + + def GEO_CODES(): + try: + from .tifffile_geodb import GEO_CODES # delayed import + except (ImportError, ValueError): + try: + from tifffile_geodb import GEO_CODES # delayed import + except (ImportError, ValueError): + GEO_CODES = {} + return GEO_CODES + + def CZ_LSMINFO(): + return [ + ("MagicNumber", "u4"), + ("StructureSize", "i4"), + ("DimensionX", "i4"), + ("DimensionY", "i4"), + ("DimensionZ", "i4"), + ("DimensionChannels", "i4"), + ("DimensionTime", "i4"), + ("DataType", "i4"), # DATATYPES + ("ThumbnailX", "i4"), + ("ThumbnailY", "i4"), + ("VoxelSizeX", "f8"), + ("VoxelSizeY", "f8"), + ("VoxelSizeZ", "f8"), + ("OriginX", "f8"), + ("OriginY", "f8"), + ("OriginZ", "f8"), + ("ScanType", "u2"), + ("SpectralScan", "u2"), + ("TypeOfData", "u4"), # TYPEOFDATA + ("OffsetVectorOverlay", "u4"), + ("OffsetInputLut", "u4"), + ("OffsetOutputLut", "u4"), + ("OffsetChannelColors", "u4"), + ("TimeIntervall", "f8"), + ("OffsetChannelDataTypes", "u4"), + ("OffsetScanInformation", "u4"), # SCANINFO + ("OffsetKsData", "u4"), + ("OffsetTimeStamps", "u4"), + ("OffsetEventList", "u4"), + ("OffsetRoi", "u4"), + ("OffsetBleachRoi", "u4"), + ("OffsetNextRecording", "u4"), + # LSM 2.0 ends here + ("DisplayAspectX", "f8"), + ("DisplayAspectY", "f8"), + ("DisplayAspectZ", "f8"), + ("DisplayAspectTime", "f8"), + ("OffsetMeanOfRoisOverlay", "u4"), + ("OffsetTopoIsolineOverlay", "u4"), + ("OffsetTopoProfileOverlay", "u4"), + ("OffsetLinescanOverlay", "u4"), + ("ToolbarFlags", "u4"), + ("OffsetChannelWavelength", "u4"), + ("OffsetChannelFactors", "u4"), + ("ObjectiveSphereCorrection", "f8"), + ("OffsetUnmixParameters", "u4"), + # LSM 3.2, 4.0 end here + ("OffsetAcquisitionParameters", "u4"), + ("OffsetCharacteristics", "u4"), + ("OffsetPalette", "u4"), + ("TimeDifferenceX", "f8"), + ("TimeDifferenceY", "f8"), + ("TimeDifferenceZ", "f8"), + ("InternalUse1", "u4"), + ("DimensionP", "i4"), + ("DimensionM", "i4"), + ("DimensionsReserved", "16i4"), + ("OffsetTilePositions", "u4"), + ("", "9u4"), # Reserved + ("OffsetPositions", "u4"), + # ('', '21u4'), # must be 0 + ] + + def CZ_LSMINFO_READERS(): + # Import functions for CZ_LSMINFO sub-records + # TODO: read more CZ_LSMINFO sub-records + return { + "ScanInformation": read_lsm_scaninfo, + "TimeStamps": read_lsm_timestamps, + "EventList": read_lsm_eventlist, + "ChannelColors": read_lsm_channelcolors, + "Positions": read_lsm_floatpairs, + "TilePositions": read_lsm_floatpairs, + "VectorOverlay": None, + "InputLut": None, + "OutputLut": None, + "TimeIntervall": None, + "ChannelDataTypes": None, + "KsData": None, + "Roi": None, + "BleachRoi": None, + "NextRecording": None, + "MeanOfRoisOverlay": None, + "TopoIsolineOverlay": None, + "TopoProfileOverlay": None, + "ChannelWavelength": None, + "SphereCorrection": None, + "ChannelFactors": None, + "UnmixParameters": None, + "AcquisitionParameters": None, + "Characteristics": None, + } + + def CZ_LSMINFO_SCANTYPE(): + # Map CZ_LSMINFO.ScanType to dimension order + return { + 0: "XYZCT", # 'Stack' normal x-y-z-scan + 1: "XYZCT", # 'Z-Scan' x-z-plane Y=1 + 2: "XYZCT", # 'Line' + 3: "XYTCZ", # 'Time Series Plane' time series x-y XYCTZ ? Z=1 + 4: "XYZTC", # 'Time Series z-Scan' time series x-z + 5: "XYTCZ", # 'Time Series Mean-of-ROIs' + 6: "XYZTC", # 'Time Series Stack' time series x-y-z + 7: "XYCTZ", # Spline Scan + 8: "XYCZT", # Spline Plane x-z + 9: "XYTCZ", # Time Series Spline Plane x-z + 10: "XYZCT", # 'Time Series Point' point mode + } + + def CZ_LSMINFO_DIMENSIONS(): + # Map dimension codes to CZ_LSMINFO attribute + return { + "X": "DimensionX", + "Y": "DimensionY", + "Z": "DimensionZ", + "C": "DimensionChannels", + "T": "DimensionTime", + "P": "DimensionP", + "M": "DimensionM", + } + + def CZ_LSMINFO_DATATYPES(): + # Description of CZ_LSMINFO.DataType + return { + 0: "varying data types", + 1: "8 bit unsigned integer", + 2: "12 bit unsigned integer", + 5: "32 bit float", + } + + def CZ_LSMINFO_TYPEOFDATA(): + # Description of CZ_LSMINFO.TypeOfData + return { + 0: "Original scan data", + 1: "Calculated data", + 2: "3D reconstruction", + 3: "Topography height map", + } + + def CZ_LSMINFO_SCANINFO_ARRAYS(): + return { + 0x20000000: "Tracks", + 0x30000000: "Lasers", + 0x60000000: "DetectionChannels", + 0x80000000: "IlluminationChannels", + 0xA0000000: "BeamSplitters", + 0xC0000000: "DataChannels", + 0x11000000: "Timers", + 0x13000000: "Markers", + } + + def CZ_LSMINFO_SCANINFO_STRUCTS(): + return { + # 0x10000000: 'Recording', + 0x40000000: "Track", + 0x50000000: "Laser", + 0x70000000: "DetectionChannel", + 0x90000000: "IlluminationChannel", + 0xB0000000: "BeamSplitter", + 0xD0000000: "DataChannel", + 0x12000000: "Timer", + 0x14000000: "Marker", + } + + def CZ_LSMINFO_SCANINFO_ATTRIBUTES(): + return { + # Recording + 0x10000001: "Name", + 0x10000002: "Description", + 0x10000003: "Notes", + 0x10000004: "Objective", + 0x10000005: "ProcessingSummary", + 0x10000006: "SpecialScanMode", + 0x10000007: "ScanType", + 0x10000008: "ScanMode", + 0x10000009: "NumberOfStacks", + 0x1000000A: "LinesPerPlane", + 0x1000000B: "SamplesPerLine", + 0x1000000C: "PlanesPerVolume", + 0x1000000D: "ImagesWidth", + 0x1000000E: "ImagesHeight", + 0x1000000F: "ImagesNumberPlanes", + 0x10000010: "ImagesNumberStacks", + 0x10000011: "ImagesNumberChannels", + 0x10000012: "LinscanXySize", + 0x10000013: "ScanDirection", + 0x10000014: "TimeSeries", + 0x10000015: "OriginalScanData", + 0x10000016: "ZoomX", + 0x10000017: "ZoomY", + 0x10000018: "ZoomZ", + 0x10000019: "Sample0X", + 0x1000001A: "Sample0Y", + 0x1000001B: "Sample0Z", + 0x1000001C: "SampleSpacing", + 0x1000001D: "LineSpacing", + 0x1000001E: "PlaneSpacing", + 0x1000001F: "PlaneWidth", + 0x10000020: "PlaneHeight", + 0x10000021: "VolumeDepth", + 0x10000023: "Nutation", + 0x10000034: "Rotation", + 0x10000035: "Precession", + 0x10000036: "Sample0time", + 0x10000037: "StartScanTriggerIn", + 0x10000038: "StartScanTriggerOut", + 0x10000039: "StartScanEvent", + 0x10000040: "StartScanTime", + 0x10000041: "StopScanTriggerIn", + 0x10000042: "StopScanTriggerOut", + 0x10000043: "StopScanEvent", + 0x10000044: "StopScanTime", + 0x10000045: "UseRois", + 0x10000046: "UseReducedMemoryRois", + 0x10000047: "User", + 0x10000048: "UseBcCorrection", + 0x10000049: "PositionBcCorrection1", + 0x10000050: "PositionBcCorrection2", + 0x10000051: "InterpolationY", + 0x10000052: "CameraBinning", + 0x10000053: "CameraSupersampling", + 0x10000054: "CameraFrameWidth", + 0x10000055: "CameraFrameHeight", + 0x10000056: "CameraOffsetX", + 0x10000057: "CameraOffsetY", + 0x10000059: "RtBinning", + 0x1000005A: "RtFrameWidth", + 0x1000005B: "RtFrameHeight", + 0x1000005C: "RtRegionWidth", + 0x1000005D: "RtRegionHeight", + 0x1000005E: "RtOffsetX", + 0x1000005F: "RtOffsetY", + 0x10000060: "RtZoom", + 0x10000061: "RtLinePeriod", + 0x10000062: "Prescan", + 0x10000063: "ScanDirectionZ", + # Track + 0x40000001: "MultiplexType", # 0 After Line; 1 After Frame + 0x40000002: "MultiplexOrder", + 0x40000003: "SamplingMode", # 0 Sample; 1 Line Avg; 2 Frame Avg + 0x40000004: "SamplingMethod", # 1 Mean; 2 Sum + 0x40000005: "SamplingNumber", + 0x40000006: "Acquire", + 0x40000007: "SampleObservationTime", + 0x4000000B: "TimeBetweenStacks", + 0x4000000C: "Name", + 0x4000000D: "Collimator1Name", + 0x4000000E: "Collimator1Position", + 0x4000000F: "Collimator2Name", + 0x40000010: "Collimator2Position", + 0x40000011: "IsBleachTrack", + 0x40000012: "IsBleachAfterScanNumber", + 0x40000013: "BleachScanNumber", + 0x40000014: "TriggerIn", + 0x40000015: "TriggerOut", + 0x40000016: "IsRatioTrack", + 0x40000017: "BleachCount", + 0x40000018: "SpiCenterWavelength", + 0x40000019: "PixelTime", + 0x40000021: "CondensorFrontlens", + 0x40000023: "FieldStopValue", + 0x40000024: "IdCondensorAperture", + 0x40000025: "CondensorAperture", + 0x40000026: "IdCondensorRevolver", + 0x40000027: "CondensorFilter", + 0x40000028: "IdTransmissionFilter1", + 0x40000029: "IdTransmission1", + 0x40000030: "IdTransmissionFilter2", + 0x40000031: "IdTransmission2", + 0x40000032: "RepeatBleach", + 0x40000033: "EnableSpotBleachPos", + 0x40000034: "SpotBleachPosx", + 0x40000035: "SpotBleachPosy", + 0x40000036: "SpotBleachPosz", + 0x40000037: "IdTubelens", + 0x40000038: "IdTubelensPosition", + 0x40000039: "TransmittedLight", + 0x4000003A: "ReflectedLight", + 0x4000003B: "SimultanGrabAndBleach", + 0x4000003C: "BleachPixelTime", + # Laser + 0x50000001: "Name", + 0x50000002: "Acquire", + 0x50000003: "Power", + # DetectionChannel + 0x70000001: "IntegrationMode", + 0x70000002: "SpecialMode", + 0x70000003: "DetectorGainFirst", + 0x70000004: "DetectorGainLast", + 0x70000005: "AmplifierGainFirst", + 0x70000006: "AmplifierGainLast", + 0x70000007: "AmplifierOffsFirst", + 0x70000008: "AmplifierOffsLast", + 0x70000009: "PinholeDiameter", + 0x7000000A: "CountingTrigger", + 0x7000000B: "Acquire", + 0x7000000C: "PointDetectorName", + 0x7000000D: "AmplifierName", + 0x7000000E: "PinholeName", + 0x7000000F: "FilterSetName", + 0x70000010: "FilterName", + 0x70000013: "IntegratorName", + 0x70000014: "ChannelName", + 0x70000015: "DetectorGainBc1", + 0x70000016: "DetectorGainBc2", + 0x70000017: "AmplifierGainBc1", + 0x70000018: "AmplifierGainBc2", + 0x70000019: "AmplifierOffsetBc1", + 0x70000020: "AmplifierOffsetBc2", + 0x70000021: "SpectralScanChannels", + 0x70000022: "SpiWavelengthStart", + 0x70000023: "SpiWavelengthStop", + 0x70000026: "DyeName", + 0x70000027: "DyeFolder", + # IlluminationChannel + 0x90000001: "Name", + 0x90000002: "Power", + 0x90000003: "Wavelength", + 0x90000004: "Aquire", + 0x90000005: "DetchannelName", + 0x90000006: "PowerBc1", + 0x90000007: "PowerBc2", + # BeamSplitter + 0xB0000001: "FilterSet", + 0xB0000002: "Filter", + 0xB0000003: "Name", + # DataChannel + 0xD0000001: "Name", + 0xD0000003: "Acquire", + 0xD0000004: "Color", + 0xD0000005: "SampleType", + 0xD0000006: "BitsPerSample", + 0xD0000007: "RatioType", + 0xD0000008: "RatioTrack1", + 0xD0000009: "RatioTrack2", + 0xD000000A: "RatioChannel1", + 0xD000000B: "RatioChannel2", + 0xD000000C: "RatioConst1", + 0xD000000D: "RatioConst2", + 0xD000000E: "RatioConst3", + 0xD000000F: "RatioConst4", + 0xD0000010: "RatioConst5", + 0xD0000011: "RatioConst6", + 0xD0000012: "RatioFirstImages1", + 0xD0000013: "RatioFirstImages2", + 0xD0000014: "DyeName", + 0xD0000015: "DyeFolder", + 0xD0000016: "Spectrum", + 0xD0000017: "Acquire", + # Timer + 0x12000001: "Name", + 0x12000002: "Description", + 0x12000003: "Interval", + 0x12000004: "TriggerIn", + 0x12000005: "TriggerOut", + 0x12000006: "ActivationTime", + 0x12000007: "ActivationNumber", + # Marker + 0x14000001: "Name", + 0x14000002: "Description", + 0x14000003: "TriggerIn", + 0x14000004: "TriggerOut", + } + + def NIH_IMAGE_HEADER(): + return [ + ("FileID", "a8"), + ("nLines", "i2"), + ("PixelsPerLine", "i2"), + ("Version", "i2"), + ("OldLutMode", "i2"), + ("OldnColors", "i2"), + ("Colors", "u1", (3, 32)), + ("OldColorStart", "i2"), + ("ColorWidth", "i2"), + ("ExtraColors", "u2", (6, 3)), + ("nExtraColors", "i2"), + ("ForegroundIndex", "i2"), + ("BackgroundIndex", "i2"), + ("XScale", "f8"), + ("Unused2", "i2"), + ("Unused3", "i2"), + ("UnitsID", "i2"), # NIH_UNITS_TYPE + ("p1", [("x", "i2"), ("y", "i2")]), + ("p2", [("x", "i2"), ("y", "i2")]), + ("CurveFitType", "i2"), # NIH_CURVEFIT_TYPE + ("nCoefficients", "i2"), + ("Coeff", "f8", 6), + ("UMsize", "u1"), + ("UM", "a15"), + ("UnusedBoolean", "u1"), + ("BinaryPic", "b1"), + ("SliceStart", "i2"), + ("SliceEnd", "i2"), + ("ScaleMagnification", "f4"), + ("nSlices", "i2"), + ("SliceSpacing", "f4"), + ("CurrentSlice", "i2"), + ("FrameInterval", "f4"), + ("PixelAspectRatio", "f4"), + ("ColorStart", "i2"), + ("ColorEnd", "i2"), + ("nColors", "i2"), + ("Fill1", "3u2"), + ("Fill2", "3u2"), + ("Table", "u1"), # NIH_COLORTABLE_TYPE + ("LutMode", "u1"), # NIH_LUTMODE_TYPE + ("InvertedTable", "b1"), + ("ZeroClip", "b1"), + ("XUnitSize", "u1"), + ("XUnit", "a11"), + ("StackType", "i2"), # NIH_STACKTYPE_TYPE + # ('UnusedBytes', 'u1', 200) + ] + + def NIH_COLORTABLE_TYPE(): + return ( + "CustomTable", + "AppleDefault", + "Pseudo20", + "Pseudo32", + "Rainbow", + "Fire1", + "Fire2", + "Ice", + "Grays", + "Spectrum", + ) + + def NIH_LUTMODE_TYPE(): + return ( + "PseudoColor", + "OldAppleDefault", + "OldSpectrum", + "GrayScale", + "ColorLut", + "CustomGrayscale", + ) + + def NIH_CURVEFIT_TYPE(): + return ( + "StraightLine", + "Poly2", + "Poly3", + "Poly4", + "Poly5", + "ExpoFit", + "PowerFit", + "LogFit", + "RodbardFit", + "SpareFit1", + "Uncalibrated", + "UncalibratedOD", + ) + + def NIH_UNITS_TYPE(): + return ( + "Nanometers", + "Micrometers", + "Millimeters", + "Centimeters", + "Meters", + "Kilometers", + "Inches", + "Feet", + "Miles", + "Pixels", + "OtherUnits", + ) + + def NIH_STACKTYPE_TYPE(): + return ("VolumeStack", "RGBStack", "MovieStack", "HSVStack") + + def TVIPS_HEADER_V1(): + # TVIPS TemData structure from EMMENU Help file + return [ + ("Version", "i4"), + ("CommentV1", "a80"), + ("HighTension", "i4"), + ("SphericalAberration", "i4"), + ("IlluminationAperture", "i4"), + ("Magnification", "i4"), + ("PostMagnification", "i4"), + ("FocalLength", "i4"), + ("Defocus", "i4"), + ("Astigmatism", "i4"), + ("AstigmatismDirection", "i4"), + ("BiprismVoltage", "i4"), + ("SpecimenTiltAngle", "i4"), + ("SpecimenTiltDirection", "i4"), + ("IlluminationTiltDirection", "i4"), + ("IlluminationTiltAngle", "i4"), + ("ImageMode", "i4"), + ("EnergySpread", "i4"), + ("ChromaticAberration", "i4"), + ("ShutterType", "i4"), + ("DefocusSpread", "i4"), + ("CcdNumber", "i4"), + ("CcdSize", "i4"), + ("OffsetXV1", "i4"), + ("OffsetYV1", "i4"), + ("PhysicalPixelSize", "i4"), + ("Binning", "i4"), + ("ReadoutSpeed", "i4"), + ("GainV1", "i4"), + ("SensitivityV1", "i4"), + ("ExposureTimeV1", "i4"), + ("FlatCorrected", "i4"), + ("DeadPxCorrected", "i4"), + ("ImageMean", "i4"), + ("ImageStd", "i4"), + ("DisplacementX", "i4"), + ("DisplacementY", "i4"), + ("DateV1", "i4"), + ("TimeV1", "i4"), + ("ImageMin", "i4"), + ("ImageMax", "i4"), + ("ImageStatisticsQuality", "i4"), + ] + + def TVIPS_HEADER_V2(): + return [ + ("ImageName", "V160"), # utf16 + ("ImageFolder", "V160"), + ("ImageSizeX", "i4"), + ("ImageSizeY", "i4"), + ("ImageSizeZ", "i4"), + ("ImageSizeE", "i4"), + ("ImageDataType", "i4"), + ("Date", "i4"), + ("Time", "i4"), + ("Comment", "V1024"), + ("ImageHistory", "V1024"), + ("Scaling", "16f4"), + ("ImageStatistics", "16c16"), + ("ImageType", "i4"), + ("ImageDisplaType", "i4"), + ("PixelSizeX", "f4"), # distance between two px in x, [nm] + ("PixelSizeY", "f4"), # distance between two px in y, [nm] + ("ImageDistanceZ", "f4"), + ("ImageDistanceE", "f4"), + ("ImageMisc", "32f4"), + ("TemType", "V160"), + ("TemHighTension", "f4"), + ("TemAberrations", "32f4"), + ("TemEnergy", "32f4"), + ("TemMode", "i4"), + ("TemMagnification", "f4"), + ("TemMagnificationCorrection", "f4"), + ("PostMagnification", "f4"), + ("TemStageType", "i4"), + ("TemStagePosition", "5f4"), # x, y, z, a, b + ("TemImageShift", "2f4"), + ("TemBeamShift", "2f4"), + ("TemBeamTilt", "2f4"), + ("TilingParameters", "7f4"), # 0: tiling? 1:x 2:y 3: max x + # 4: max y 5: overlap x 6: overlap y + ("TemIllumination", "3f4"), # 0: spotsize 1: intensity + ("TemShutter", "i4"), + ("TemMisc", "32f4"), + ("CameraType", "V160"), + ("PhysicalPixelSizeX", "f4"), + ("PhysicalPixelSizeY", "f4"), + ("OffsetX", "i4"), + ("OffsetY", "i4"), + ("BinningX", "i4"), + ("BinningY", "i4"), + ("ExposureTime", "f4"), + ("Gain", "f4"), + ("ReadoutRate", "f4"), + ("FlatfieldDescription", "V160"), + ("Sensitivity", "f4"), + ("Dose", "f4"), + ("CamMisc", "32f4"), + ("FeiMicroscopeInformation", "V1024"), + ("FeiSpecimenInformation", "V1024"), + ("Magic", "u4"), + ] + + def MM_HEADER(): + # Olympus FluoView MM_Header + MM_DIMENSION = [ + ("Name", "a16"), + ("Size", "i4"), + ("Origin", "f8"), + ("Resolution", "f8"), + ("Unit", "a64"), + ] + return [ + ("HeaderFlag", "i2"), + ("ImageType", "u1"), + ("ImageName", "a257"), + ("OffsetData", "u4"), + ("PaletteSize", "i4"), + ("OffsetPalette0", "u4"), + ("OffsetPalette1", "u4"), + ("CommentSize", "i4"), + ("OffsetComment", "u4"), + ("Dimensions", MM_DIMENSION, 10), + ("OffsetPosition", "u4"), + ("MapType", "i2"), + ("MapMin", "f8"), + ("MapMax", "f8"), + ("MinValue", "f8"), + ("MaxValue", "f8"), + ("OffsetMap", "u4"), + ("Gamma", "f8"), + ("Offset", "f8"), + ("GrayChannel", MM_DIMENSION), + ("OffsetThumbnail", "u4"), + ("VoiceField", "i4"), + ("OffsetVoiceField", "u4"), + ] + + def MM_DIMENSIONS(): + # Map FluoView MM_Header.Dimensions to axes characters + return { + "X": "X", + "Y": "Y", + "Z": "Z", + "T": "T", + "CH": "C", + "WAVELENGTH": "C", + "TIME": "T", + "XY": "R", + "EVENT": "V", + "EXPOSURE": "L", + } + + def UIC_TAGS(): + # Map Universal Imaging Corporation MetaMorph internal tag ids to + # name and type + from fractions import Fraction # delayed import + + return [ + ("AutoScale", int), + ("MinScale", int), + ("MaxScale", int), + ("SpatialCalibration", int), + ("XCalibration", Fraction), + ("YCalibration", Fraction), + ("CalibrationUnits", str), + ("Name", str), + ("ThreshState", int), + ("ThreshStateRed", int), + ("tagid_10", None), # undefined + ("ThreshStateGreen", int), + ("ThreshStateBlue", int), + ("ThreshStateLo", int), + ("ThreshStateHi", int), + ("Zoom", int), + ("CreateTime", julian_datetime), + ("LastSavedTime", julian_datetime), + ("currentBuffer", int), + ("grayFit", None), + ("grayPointCount", None), + ("grayX", Fraction), + ("grayY", Fraction), + ("grayMin", Fraction), + ("grayMax", Fraction), + ("grayUnitName", str), + ("StandardLUT", int), + ("wavelength", int), + ("StagePosition", "(%i,2,2)u4"), # N xy positions as fract + ("CameraChipOffset", "(%i,2,2)u4"), # N xy offsets as fract + ("OverlayMask", None), + ("OverlayCompress", None), + ("Overlay", None), + ("SpecialOverlayMask", None), + ("SpecialOverlayCompress", None), + ("SpecialOverlay", None), + ("ImageProperty", read_uic_image_property), + ("StageLabel", "%ip"), # N str + ("AutoScaleLoInfo", Fraction), + ("AutoScaleHiInfo", Fraction), + ("AbsoluteZ", "(%i,2)u4"), # N fractions + ("AbsoluteZValid", "(%i,)u4"), # N long + ("Gamma", "I"), # 'I' uses offset + ("GammaRed", "I"), + ("GammaGreen", "I"), + ("GammaBlue", "I"), + ("CameraBin", "2I"), + ("NewLUT", int), + ("ImagePropertyEx", None), + ("PlaneProperty", int), + ("UserLutTable", "(256,3)u1"), + ("RedAutoScaleInfo", int), + ("RedAutoScaleLoInfo", Fraction), + ("RedAutoScaleHiInfo", Fraction), + ("RedMinScaleInfo", int), + ("RedMaxScaleInfo", int), + ("GreenAutoScaleInfo", int), + ("GreenAutoScaleLoInfo", Fraction), + ("GreenAutoScaleHiInfo", Fraction), + ("GreenMinScaleInfo", int), + ("GreenMaxScaleInfo", int), + ("BlueAutoScaleInfo", int), + ("BlueAutoScaleLoInfo", Fraction), + ("BlueAutoScaleHiInfo", Fraction), + ("BlueMinScaleInfo", int), + ("BlueMaxScaleInfo", int), + # ('OverlayPlaneColor', read_uic_overlay_plane_color), + ] + + def PILATUS_HEADER(): + # PILATUS CBF Header Specification, Version 1.4 + # Map key to [value_indices], type + return { + "Detector": ([slice(1, None)], str), + "Pixel_size": ([1, 4], float), + "Silicon": ([3], float), + "Exposure_time": ([1], float), + "Exposure_period": ([1], float), + "Tau": ([1], float), + "Count_cutoff": ([1], int), + "Threshold_setting": ([1], float), + "Gain_setting": ([1, 2], str), + "N_excluded_pixels": ([1], int), + "Excluded_pixels": ([1], str), + "Flat_field": ([1], str), + "Trim_file": ([1], str), + "Image_path": ([1], str), + # optional + "Wavelength": ([1], float), + "Energy_range": ([1, 2], float), + "Detector_distance": ([1], float), + "Detector_Voffset": ([1], float), + "Beam_xy": ([1, 2], float), + "Flux": ([1], str), + "Filter_transmission": ([1], float), + "Start_angle": ([1], float), + "Angle_increment": ([1], float), + "Detector_2theta": ([1], float), + "Polarization": ([1], float), + "Alpha": ([1], float), + "Kappa": ([1], float), + "Phi": ([1], float), + "Phi_increment": ([1], float), + "Chi": ([1], float), + "Chi_increment": ([1], float), + "Oscillation_axis": ([slice(1, None)], str), + "N_oscillations": ([1], int), + "Start_position": ([1], float), + "Position_increment": ([1], float), + "Shutter_time": ([1], float), + "Omega": ([1], float), + "Omega_increment": ([1], float), + } + + def REVERSE_BITORDER_BYTES(): + # Bytes with reversed bitorder + return ( + b"\x00\x80@\xc0 \xa0`\xe0\x10\x90P\xd00\xb0p\xf0\x08\x88H\xc8(" + b"\xa8h\xe8\x18\x98X\xd88\xb8x\xf8\x04\x84D\xc4$\xa4d\xe4\x14" + b"\x94T\xd44\xb4t\xf4\x0c\x8cL\xcc,\xacl\xec\x1c\x9c\\\xdc<\xbc|" + b'\xfc\x02\x82B\xc2"\xa2b\xe2\x12\x92R\xd22\xb2r\xf2\n\x8aJ\xca*' + b"\xaaj\xea\x1a\x9aZ\xda:\xbaz\xfa\x06\x86F\xc6&\xa6f\xe6\x16" + b"\x96V\xd66\xb6v\xf6\x0e\x8eN\xce.\xaen\xee\x1e\x9e^\xde>\xbe~" + b"\xfe\x01\x81A\xc1!\xa1a\xe1\x11\x91Q\xd11\xb1q\xf1\t\x89I\xc9)" + b"\xa9i\xe9\x19\x99Y\xd99\xb9y\xf9\x05\x85E\xc5%\xa5e\xe5\x15" + b"\x95U\xd55\xb5u\xf5\r\x8dM\xcd-\xadm\xed\x1d\x9d]\xdd=\xbd}" + b"\xfd\x03\x83C\xc3#\xa3c\xe3\x13\x93S\xd33\xb3s\xf3\x0b\x8bK" + b"\xcb+\xabk\xeb\x1b\x9b[\xdb;\xbb{\xfb\x07\x87G\xc7'\xa7g\xe7" + b"\x17\x97W\xd77\xb7w\xf7\x0f\x8fO\xcf/\xafo\xef\x1f\x9f_" + b"\xdf?\xbf\x7f\xff" + ) + + def REVERSE_BITORDER_ARRAY(): + # Numpy array of bytes with reversed bitorder + return numpy.frombuffer(TIFF.REVERSE_BITORDER_BYTES, dtype="uint8") + + def ALLOCATIONGRANULARITY(): + # alignment for writing contiguous data to TIFF + import mmap # delayed import + + return mmap.ALLOCATIONGRANULARITY + + +def read_tags(fh, byteorder, offsetsize, tagnames, customtags=None, maxifds=None): + """Read tags from chain of IFDs and return as list of dicts. + + The file handle position must be at a valid IFD header. + + """ + if offsetsize == 4: + offsetformat = byteorder + "I" + tagnosize = 2 + tagnoformat = byteorder + "H" + tagsize = 12 + tagformat1 = byteorder + "HH" + tagformat2 = byteorder + "I4s" + elif offsetsize == 8: + offsetformat = byteorder + "Q" + tagnosize = 8 + tagnoformat = byteorder + "Q" + tagsize = 20 + tagformat1 = byteorder + "HH" + tagformat2 = byteorder + "Q8s" + else: + raise ValueError("invalid offset size") + + if customtags is None: + customtags = {} + if maxifds is None: + maxifds = 2**32 + + result = [] + unpack = struct.unpack + offset = fh.tell() + while len(result) < maxifds: + # loop over IFDs + try: + tagno = unpack(tagnoformat, fh.read(tagnosize))[0] + if tagno > 4096: + raise ValueError("suspicious number of tags") + except Exception: + warnings.warn("corrupted tag list at offset %i" % offset) + break + + tags = {} + data = fh.read(tagsize * tagno) + pos = fh.tell() + index = 0 + for _ in range(tagno): + code, type_ = unpack(tagformat1, data[index : index + 4]) + count, value = unpack(tagformat2, data[index + 4 : index + tagsize]) + index += tagsize + name = tagnames.get(code, str(code)) + try: + dtype = TIFF.DATA_FORMATS[type_] + except KeyError: + raise TiffTag.Error("unknown tag data type %i" % type_) + + fmt = "%s%i%s" % (byteorder, count * int(dtype[0]), dtype[1]) + size = struct.calcsize(fmt) + if size > offsetsize or code in customtags: + offset = unpack(offsetformat, value)[0] + if offset < 8 or offset > fh.size - size: + raise TiffTag.Error("invalid tag value offset %i" % offset) + fh.seek(offset) + if code in customtags: + readfunc = customtags[code][1] + value = readfunc(fh, byteorder, dtype, count, offsetsize) + elif type_ == 7 or (count > 1 and dtype[-1] == "B"): + value = read_bytes(fh, byteorder, dtype, count, offsetsize) + elif code in tagnames or dtype[-1] == "s": + value = unpack(fmt, fh.read(size)) + else: + value = read_numpy(fh, byteorder, dtype, count, offsetsize) + elif dtype[-1] == "B" or type_ == 7: + value = value[:size] + else: + value = unpack(fmt, value[:size]) + + if code not in customtags and code not in TIFF.TAG_TUPLE: + if len(value) == 1: + value = value[0] + if type_ != 7 and dtype[-1] == "s" and isinstance(value, bytes): + # TIFF ASCII fields can contain multiple strings, + # each terminated with a NUL + try: + value = bytes2str(stripascii(value).strip()) + except UnicodeDecodeError: + warnings.warn("tag %i: coercing invalid ASCII to bytes" % code) + + tags[name] = value + + result.append(tags) + # read offset to next page + fh.seek(pos) + offset = unpack(offsetformat, fh.read(offsetsize))[0] + if offset == 0: + break + if offset >= fh.size: + warnings.warn("invalid page offset %i" % offset) + break + fh.seek(offset) + + if result and maxifds == 1: + result = result[0] + return result + + +def read_exif_ifd(fh, byteorder, dtype, count, offsetsize): + """Read EXIF tags from file and return as dict.""" + exif = read_tags(fh, byteorder, offsetsize, TIFF.EXIF_TAGS, maxifds=1) + for name in ("ExifVersion", "FlashpixVersion"): + try: + exif[name] = bytes2str(exif[name]) + except Exception: + pass + if "UserComment" in exif: + idcode = exif["UserComment"][:8] + try: + if idcode == b"ASCII\x00\x00\x00": + exif["UserComment"] = bytes2str(exif["UserComment"][8:]) + elif idcode == b"UNICODE\x00": + exif["UserComment"] = exif["UserComment"][8:].decode("utf-16") + except Exception: + pass + return exif + + +def read_gps_ifd(fh, byteorder, dtype, count, offsetsize): + """Read GPS tags from file and return as dict.""" + return read_tags(fh, byteorder, offsetsize, TIFF.GPS_TAGS, maxifds=1) + + +def read_interoperability_ifd(fh, byteorder, dtype, count, offsetsize): + """Read Interoperability tags from file and return as dict.""" + tag_names = {1: "InteroperabilityIndex"} + return read_tags(fh, byteorder, offsetsize, tag_names, maxifds=1) + + +def read_bytes(fh, byteorder, dtype, count, offsetsize): + """Read tag data from file and return as byte string.""" + dtype = "B" if dtype[-1] == "s" else byteorder + dtype[-1] + count *= numpy.dtype(dtype).itemsize + data = fh.read(count) + if len(data) != count: + warnings.warn("failed to read all bytes: %i, %i" % (len(data), count)) + return data + + +def read_utf8(fh, byteorder, dtype, count, offsetsize): + """Read tag data from file and return as unicode string.""" + return fh.read(count).decode("utf-8") + + +def read_numpy(fh, byteorder, dtype, count, offsetsize): + """Read tag data from file and return as numpy array.""" + dtype = "b" if dtype[-1] == "s" else byteorder + dtype[-1] + return fh.read_array(dtype, count) + + +def read_colormap(fh, byteorder, dtype, count, offsetsize): + """Read ColorMap data from file and return as numpy array.""" + cmap = fh.read_array(byteorder + dtype[-1], count) + cmap.shape = (3, -1) + return cmap + + +def read_json(fh, byteorder, dtype, count, offsetsize): + """Read JSON tag data from file and return as object.""" + data = fh.read(count) + try: + return json.loads(unicode(stripnull(data), "utf-8")) + except ValueError: + warnings.warn("invalid JSON '%s'" % data) + + +def read_mm_header(fh, byteorder, dtype, count, offsetsize): + """Read FluoView mm_header tag from file and return as dict.""" + mmh = fh.read_record(TIFF.MM_HEADER, byteorder=byteorder) + mmh = recarray2dict(mmh) + mmh["Dimensions"] = [ + (bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) + for d in mmh["Dimensions"] + ] + d = mmh["GrayChannel"] + mmh["GrayChannel"] = ( + bytes2str(d[0]).strip(), + d[1], + d[2], + d[3], + bytes2str(d[4]).strip(), + ) + return mmh + + +def read_mm_stamp(fh, byteorder, dtype, count, offsetsize): + """Read FluoView mm_stamp tag from file and return as numpy.ndarray.""" + return fh.read_array(byteorder + "f8", 8) + + +def read_uic1tag(fh, byteorder, dtype, count, offsetsize, planecount=None): + """Read MetaMorph STK UIC1Tag from file and return as dict. + + Return empty dictionary if planecount is unknown. + + """ + assert dtype in ("2I", "1I") and byteorder == "<" + result = {} + if dtype == "2I": + # pre MetaMorph 2.5 (not tested) + values = fh.read_array(" structure_size: + break + lsminfo.append((name, dtype)) + else: + lsminfo = TIFF.CZ_LSMINFO + + lsminfo = fh.read_record(lsminfo, byteorder=byteorder) + lsminfo = recarray2dict(lsminfo) + + # read LSM info subrecords at offsets + for name, reader in TIFF.CZ_LSMINFO_READERS.items(): + if reader is None: + continue + offset = lsminfo.get("Offset" + name, 0) + if offset < 8: + continue + fh.seek(offset) + try: + lsminfo[name] = reader(fh) + except ValueError: + pass + return lsminfo + + +def read_lsm_floatpairs(fh): + """Read LSM sequence of float pairs from file and return as list.""" + size = struct.unpack(" 0: + esize, etime, etype = struct.unpack(" 4: + size = struct.unpack(" 1 else {} + return frame_data, roi_data + + +def read_micromanager_metadata(fh): + """Read MicroManager non-TIFF settings from open file and return as dict. + + The settings can be used to read image data without parsing the TIFF file. + + Raise ValueError if the file does not contain valid MicroManager metadata. + + """ + fh.seek(0) + try: + byteorder = {b"II": "<", b"MM": ">"}[fh.read(2)] + except IndexError: + raise ValueError("not a MicroManager TIFF file") + + result = {} + fh.seek(8) + ( + index_header, + index_offset, + display_header, + display_offset, + comments_header, + comments_offset, + summary_header, + summary_length, + ) = struct.unpack(byteorder + "IIIIIIII", fh.read(32)) + + if summary_header != 2355492: + raise ValueError("invalid MicroManager summary header") + result["Summary"] = read_json(fh, byteorder, None, summary_length, None) + + if index_header != 54773648: + raise ValueError("invalid MicroManager index header") + fh.seek(index_offset) + header, count = struct.unpack(byteorder + "II", fh.read(8)) + if header != 3453623: + raise ValueError("invalid MicroManager index header") + data = struct.unpack(byteorder + "IIIII" * count, fh.read(20 * count)) + result["IndexMap"] = { + "Channel": data[::5], + "Slice": data[1::5], + "Frame": data[2::5], + "Position": data[3::5], + "Offset": data[4::5], + } + + if display_header != 483765892: + raise ValueError("invalid MicroManager display header") + fh.seek(display_offset) + header, count = struct.unpack(byteorder + "II", fh.read(8)) + if header != 347834724: + raise ValueError("invalid MicroManager display header") + result["DisplaySettings"] = read_json(fh, byteorder, None, count, None) + + if comments_header != 99384722: + raise ValueError("invalid MicroManager comments header") + fh.seek(comments_offset) + header, count = struct.unpack(byteorder + "II", fh.read(8)) + if header != 84720485: + raise ValueError("invalid MicroManager comments header") + result["Comments"] = read_json(fh, byteorder, None, count, None) + + return result + + +def read_metaseries_catalog(fh): + """Read MetaSeries non-TIFF hint catalog from file. + + Raise ValueError if the file does not contain a valid hint catalog. + + """ + # TODO: implement read_metaseries_catalog + raise NotImplementedError() + + +def imagej_metadata_tags(metadata, byteorder): + """Return IJMetadata and IJMetadataByteCounts tags from metadata dict. + + The tags can be passed to the TiffWriter.save function as extratags. + + The metadata dict may contain the following keys and values: + + Info : str + Human-readable information as string. + Labels : sequence of str + Human-readable labels for each channel. + Ranges : sequence of doubles + Lower and upper values for each channel. + LUTs : sequence of (3, 256) uint8 ndarrays + Color palettes for each channel. + Plot : bytes + Undocumented ImageJ internal format. + ROI: bytes + Undocumented ImageJ internal region of interest format. + Overlays : bytes + Undocumented ImageJ internal format. + + """ + header = [{">": b"IJIJ", "<": b"JIJI"}[byteorder]] + bytecounts = [0] + body = [] + + def _string(data, byteorder): + return data.encode("utf-16" + {">": "be", "<": "le"}[byteorder]) + + def _doubles(data, byteorder): + return struct.pack(byteorder + ("d" * len(data)), *data) + + def _ndarray(data, byteorder): + return data.tobytes() + + def _bytes(data, byteorder): + return data + + metadata_types = ( + ("Info", b"info", 1, _string), + ("Labels", b"labl", None, _string), + ("Ranges", b"rang", 1, _doubles), + ("LUTs", b"luts", None, _ndarray), + ("Plot", b"plot", 1, _bytes), + ("ROI", b"roi ", 1, _bytes), + ("Overlays", b"over", None, _bytes), + ) + + for key, mtype, count, func in metadata_types: + if key.lower() in metadata: + key = key.lower() + elif key not in metadata: + continue + if byteorder == "<": + mtype = mtype[::-1] + values = metadata[key] + if count is None: + count = len(values) + else: + values = [values] + header.append(mtype + struct.pack(byteorder + "I", count)) + for value in values: + data = func(value, byteorder) + body.append(data) + bytecounts.append(len(data)) + + if not body: + return () + body = b"".join(body) + header = b"".join(header) + data = header + body + bytecounts[0] = len(header) + bytecounts = struct.pack(byteorder + ("I" * len(bytecounts)), *bytecounts) + return ( + (50839, "B", len(data), data, True), + (50838, "I", len(bytecounts) // 4, bytecounts, True), + ) + + +def imagej_metadata(data, bytecounts, byteorder): + """Return IJMetadata tag value as dict. + + The 'Info' string can have multiple formats, e.g. OIF or ScanImage, + that might be parsed into dicts using the matlabstr2py or + oiffile.SettingsFile functions. + + """ + + def _string(data, byteorder): + return data.decode("utf-16" + {">": "be", "<": "le"}[byteorder]) + + def _doubles(data, byteorder): + return struct.unpack(byteorder + ("d" * (len(data) // 8)), data) + + def _lut(data, byteorder): + return numpy.frombuffer(data, "uint8").reshape(-1, 256) + + def _bytes(data, byteorder): + return data + + metadata_types = { # big-endian + b"info": ("Info", _string), + b"labl": ("Labels", _string), + b"rang": ("Ranges", _doubles), + b"luts": ("LUTs", _lut), + b"plot": ("Plots", _bytes), + b"roi ": ("ROI", _bytes), + b"over": ("Overlays", _bytes), + } + metadata_types.update( # little-endian + dict((k[::-1], v) for k, v in metadata_types.items()) + ) + + if not bytecounts: + raise ValueError("no ImageJ metadata") + + if data[:4] not in (b"IJIJ", b"JIJI"): + raise ValueError("invalid ImageJ metadata") + + header_size = bytecounts[0] + if header_size < 12 or header_size > 804: + raise ValueError("invalid ImageJ metadata header size") + + ntypes = (header_size - 4) // 8 + header = struct.unpack(byteorder + "4sI" * ntypes, data[4 : 4 + ntypes * 8]) + pos = 4 + ntypes * 8 + counter = 0 + result = {} + for mtype, count in zip(header[::2], header[1::2]): + values = [] + name, func = metadata_types.get(mtype, (bytes2str(mtype), read_bytes)) + for _ in range(count): + counter += 1 + pos1 = pos + bytecounts[counter] + values.append(func(data[pos:pos1], byteorder)) + pos = pos1 + result[name.strip()] = values[0] if count == 1 else values + return result + + +def imagej_description_metadata(description): + """Return metatata from ImageJ image description as dict. + + Raise ValueError if not a valid ImageJ description. + + >>> description = 'ImageJ=1.11a\\nimages=510\\nhyperstack=true\\n' + >>> imagej_description_metadata(description) # doctest: +SKIP + {'ImageJ': '1.11a', 'images': 510, 'hyperstack': True} + + """ + + def _bool(val): + return {"true": True, "false": False}[val.lower()] + + result = {} + for line in description.splitlines(): + try: + key, val = line.split("=") + except Exception: + continue + key = key.strip() + val = val.strip() + for dtype in (int, float, _bool): + try: + val = dtype(val) + break + except Exception: + pass + result[key] = val + + if "ImageJ" not in result: + raise ValueError("not a ImageJ image description") + return result + + +def imagej_description( + shape, + rgb=None, + colormaped=False, + version="1.11a", + hyperstack=None, + mode=None, + loop=None, + **kwargs +): + """Return ImageJ image description from data shape. + + ImageJ can handle up to 6 dimensions in order TZCYXS. + + >>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP + ImageJ=1.11a + images=510 + channels=2 + slices=5 + frames=51 + hyperstack=true + mode=grayscale + loop=false + + """ + if colormaped: + raise NotImplementedError("ImageJ colormapping not supported") + shape = imagej_shape(shape, rgb=rgb) + rgb = shape[-1] in (3, 4) + + result = ["ImageJ=%s" % version] + append = [] + result.append("images=%i" % product(shape[:-3])) + if hyperstack is None: + hyperstack = True + append.append("hyperstack=true") + else: + append.append("hyperstack=%s" % bool(hyperstack)) + if shape[2] > 1: + result.append("channels=%i" % shape[2]) + if mode is None and not rgb: + mode = "grayscale" + if hyperstack and mode: + append.append("mode=%s" % mode) + if shape[1] > 1: + result.append("slices=%i" % shape[1]) + if shape[0] > 1: + result.append("frames=%i" % shape[0]) + if loop is None: + append.append("loop=false") + if loop is not None: + append.append("loop=%s" % bool(loop)) + for key, value in kwargs.items(): + append.append("%s=%s" % (key.lower(), value)) + + return "\n".join(result + append + [""]) + + +def imagej_shape(shape, rgb=None): + """Return shape normalized to 6D ImageJ hyperstack TZCYXS. + + Raise ValueError if not a valid ImageJ hyperstack shape. + + >>> imagej_shape((2, 3, 4, 5, 3), False) + (2, 3, 4, 5, 3, 1) + + """ + shape = tuple(int(i) for i in shape) + ndim = len(shape) + if 1 > ndim > 6: + raise ValueError("invalid ImageJ hyperstack: not 2 to 6 dimensional") + if rgb is None: + rgb = shape[-1] in (3, 4) and ndim > 2 + if rgb and shape[-1] not in (3, 4): + raise ValueError("invalid ImageJ hyperstack: not a RGB image") + if not rgb and ndim == 6 and shape[-1] != 1: + raise ValueError("invalid ImageJ hyperstack: not a non-RGB image") + if rgb or shape[-1] == 1: + return (1,) * (6 - ndim) + shape + return (1,) * (5 - ndim) + shape + (1,) + + +def json_description(shape, **metadata): + """Return JSON image description from data shape and other meta data. + + Return UTF-8 encoded JSON. + + >>> json_description((256, 256, 3), axes='YXS') # doctest: +SKIP + b'{"shape": [256, 256, 3], "axes": "YXS"}' + + """ + metadata.update(shape=shape) + return json.dumps(metadata) # .encode('utf-8') + + +def json_description_metadata(description): + """Return metatata from JSON formatted image description as dict. + + Raise ValuError if description is of unknown format. + + >>> description = '{"shape": [256, 256, 3], "axes": "YXS"}' + >>> json_description_metadata(description) # doctest: +SKIP + {'shape': [256, 256, 3], 'axes': 'YXS'} + >>> json_description_metadata('shape=(256, 256, 3)') + {'shape': (256, 256, 3)} + + """ + if description[:6] == "shape=": + # old style 'shaped' description; not JSON + shape = tuple(int(i) for i in description[7:-1].split(",")) + return dict(shape=shape) + if description[:1] == "{" and description[-1:] == "}": + # JSON description + return json.loads(description) + raise ValueError("invalid JSON image description", description) + + +def fluoview_description_metadata(description, ignoresections=None): + """Return metatata from FluoView image description as dict. + + The FluoView image description format is unspecified. Expect failures. + + >>> descr = ('[Intensity Mapping]\\nMap Ch0: Range=00000 to 02047\\n' + ... '[Intensity Mapping End]') + >>> fluoview_description_metadata(descr) + {'Intensity Mapping': {'Map Ch0: Range': '00000 to 02047'}} + + """ + if not description.startswith("["): + raise ValueError("invalid FluoView image description") + if ignoresections is None: + ignoresections = {"Region Info (Fields)", "Protocol Description"} + + result = {} + sections = [result] + comment = False + for line in description.splitlines(): + if not comment: + line = line.strip() + if not line: + continue + if line[0] == "[": + if line[-5:] == " End]": + # close section + del sections[-1] + section = sections[-1] + name = line[1:-5] + if comment: + section[name] = "\n".join(section[name]) + if name[:4] == "LUT ": + a = numpy.array(section[name], dtype="uint8") + a.shape = -1, 3 + section[name] = a + continue + # new section + comment = False + name = line[1:-1] + if name[:4] == "LUT ": + section = [] + elif name in ignoresections: + section = [] + comment = True + else: + section = {} + sections.append(section) + result[name] = section + continue + # add entry + if comment: + section.append(line) + continue + line = line.split("=", 1) + if len(line) == 1: + section[line[0].strip()] = None + continue + key, value = line + if key[:4] == "RGB ": + section.extend(int(rgb) for rgb in value.split()) + else: + section[key.strip()] = astype(value.strip()) + return result + + +def pilatus_description_metadata(description): + """Return metatata from Pilatus image description as dict. + + Return metadata from Pilatus pixel array detectors by Dectris, created + by camserver or TVX software. + + >>> pilatus_description_metadata('# Pixel_size 172e-6 m x 172e-6 m') + {'Pixel_size': (0.000172, 0.000172)} + + """ + result = {} + if not description.startswith("# "): + return result + for c in "#:=,()": + description = description.replace(c, " ") + for line in description.split("\n"): + if line[:2] != " ": + continue + line = line.split() + name = line[0] + if line[0] not in TIFF.PILATUS_HEADER: + try: + result["DateTime"] = datetime.datetime.strptime( + " ".join(line), "%Y-%m-%dT%H %M %S.%f" + ) + except Exception: + result[name] = " ".join(line[1:]) + continue + indices, dtype = TIFF.PILATUS_HEADER[line[0]] + if isinstance(indices[0], slice): + # assumes one slice + values = line[indices[0]] + else: + values = [line[i] for i in indices] + if dtype is float and values[0] == "not": + values = ["NaN"] + values = tuple(dtype(v) for v in values) + if dtype == str: + values = " ".join(values) + elif len(values) == 1: + values = values[0] + result[name] = values + return result + + +def svs_description_metadata(description): + """Return metatata from Aperio image description as dict. + + The Aperio image description format is unspecified. Expect failures. + + >>> svs_description_metadata('Aperio Image Library v1.0') + {'Aperio Image Library': 'v1.0'} + + """ + if not description.startswith("Aperio Image Library "): + raise ValueError("invalid Aperio image description") + result = {} + lines = description.split("\n") + key, value = lines[0].strip().rsplit(None, 1) # 'Aperio Image Library' + result[key.strip()] = value.strip() + if len(lines) == 1: + return result + items = lines[1].split("|") + result[""] = items[0].strip() # TODO: parse this? + for item in items[1:]: + key, value = item.split(" = ") + result[key.strip()] = astype(value.strip()) + return result + + +def stk_description_metadata(description): + """Return metadata from MetaMorph image description as list of dict. + + The MetaMorph image description format is unspecified. Expect failures. + + """ + description = description.strip() + if not description: + return [] + try: + description = bytes2str(description) + except UnicodeDecodeError: + warnings.warn("failed to parse MetaMorph image description") + return [] + result = [] + for plane in description.split("\x00"): + d = {} + for line in plane.split("\r\n"): + line = line.split(":", 1) + if len(line) > 1: + name, value = line + d[name.strip()] = astype(value.strip()) + else: + value = line[0].strip() + if value: + if "" in d: + d[""].append(value) + else: + d[""] = [value] + result.append(d) + return result + + +def metaseries_description_metadata(description): + """Return metatata from MetaSeries image description as dict.""" + if not description.startswith(""): + raise ValueError("invalid MetaSeries image description") + + from xml.etree import cElementTree as etree # delayed import + + root = etree.fromstring(description) + types = {"float": float, "int": int, "bool": lambda x: asbool(x, "on", "off")} + + def parse(root, result): + # recursive + for child in root: + attrib = child.attrib + if not attrib: + result[child.tag] = parse(child, {}) + continue + if "id" in attrib: + i = attrib["id"] + t = attrib["type"] + v = attrib["value"] + if t in types: + result[i] = types[t](v) + else: + result[i] = v + return result + + adict = parse(root, {}) + if "Description" in adict: + adict["Description"] = adict["Description"].replace(" ", "\n") + return adict + + +def scanimage_description_metadata(description): + """Return metatata from ScanImage image description as dict.""" + return matlabstr2py(description) + + +def scanimage_artist_metadata(artist): + """Return metatata from ScanImage artist tag as dict.""" + try: + return json.loads(artist) + except ValueError: + warnings.warn("invalid JSON '%s'" % artist) + + +def _replace_by(module_function, package=__package__, warn=None, prefix="_"): + """Try replace decorated function by module.function.""" + return lambda f: f # imageio: just use what's in here + + def _warn(e, warn): + if warn is None: + warn = "\n Functionality might be degraded or be slow.\n" + elif warn is True: + warn = "" + elif not warn: + return + warnings.warn("%s%s" % (e, warn)) + + try: + from importlib import import_module + except ImportError as e: + _warn(e, warn) + return identityfunc + + def decorate(func, module_function=module_function, warn=warn): + module, function = module_function.split(".") + try: + if package: + module = import_module("." + module, package=package) + else: + module = import_module(module) + except Exception as e: + _warn(e, warn) + return func + try: + func, oldfunc = getattr(module, function), func + except Exception as e: + _warn(e, warn) + return func + globals()[prefix + func.__name__] = oldfunc + return func + + return decorate + + +def decode_floats(data): + """Decode floating point horizontal differencing. + + The TIFF predictor type 3 reorders the bytes of the image values and + applies horizontal byte differencing to improve compression of floating + point images. The ordering of interleaved color channels is preserved. + + Parameters + ---------- + data : numpy.ndarray + The image to be decoded. The dtype must be a floating point. + The shape must include the number of contiguous samples per pixel + even if 1. + + """ + shape = data.shape + dtype = data.dtype + if len(shape) < 3: + raise ValueError("invalid data shape") + if dtype.char not in "dfe": + raise ValueError("not a floating point image") + littleendian = data.dtype.byteorder == "<" or ( + sys.byteorder == "little" and data.dtype.byteorder == "=" + ) + # undo horizontal byte differencing + data = data.view("uint8") + data.shape = shape[:-2] + (-1,) + shape[-1:] + numpy.cumsum(data, axis=-2, dtype="uint8", out=data) + # reorder bytes + if littleendian: + data.shape = shape[:-2] + (-1,) + shape[-2:] + data = numpy.swapaxes(data, -3, -2) + data = numpy.swapaxes(data, -2, -1) + data = data[..., ::-1] + # back to float + data = numpy.ascontiguousarray(data) + data = data.view(dtype) + data.shape = shape + return data + + +@_replace_by("_tifffile.decode_packbits") +def decode_packbits(encoded): + """Decompress PackBits encoded byte string. + + PackBits is a simple byte-oriented run-length compression scheme. + + """ + func = ord if sys.version[0] == "2" else identityfunc + result = [] + result_extend = result.extend + i = 0 + try: + while True: + n = func(encoded[i]) + 1 + i += 1 + if n < 129: + result_extend(encoded[i : i + n]) + i += n + elif n > 129: + result_extend(encoded[i : i + 1] * (258 - n)) + i += 1 + except IndexError: + pass + return b"".join(result) if sys.version[0] == "2" else bytes(result) + + +@_replace_by("_tifffile.decode_lzw") +def decode_lzw(encoded): + """Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string). + + The strip must begin with a CLEAR code and end with an EOI code. + + This implementation of the LZW decoding algorithm is described in (1) and + is not compatible with old style LZW compressed files like quad-lzw.tif. + + """ + len_encoded = len(encoded) + bitcount_max = len_encoded * 8 + unpack = struct.unpack + + if sys.version[0] == "2": + newtable = [chr(i) for i in range(256)] + else: + newtable = [bytes([i]) for i in range(256)] + newtable.extend((0, 0)) + + def next_code(): + """Return integer of 'bitw' bits at 'bitcount' position in encoded.""" + start = bitcount // 8 + s = encoded[start : start + 4] + try: + code = unpack(">I", s)[0] + except Exception: + code = unpack(">I", s + b"\x00" * (4 - len(s)))[0] + code <<= bitcount % 8 + code &= mask + return code >> shr + + switchbitch = { # code: bit-width, shr-bits, bit-mask + 255: (9, 23, int(9 * "1" + "0" * 23, 2)), + 511: (10, 22, int(10 * "1" + "0" * 22, 2)), + 1023: (11, 21, int(11 * "1" + "0" * 21, 2)), + 2047: (12, 20, int(12 * "1" + "0" * 20, 2)), + } + bitw, shr, mask = switchbitch[255] + bitcount = 0 + + if len_encoded < 4: + raise ValueError("strip must be at least 4 characters long") + + if next_code() != 256: + raise ValueError("strip must begin with CLEAR code") + + code = 0 + oldcode = 0 + result = [] + result_append = result.append + while True: + code = next_code() # ~5% faster when inlining this function + bitcount += bitw + if code == 257 or bitcount >= bitcount_max: # EOI + break + if code == 256: # CLEAR + table = newtable[:] + table_append = table.append + lentable = 258 + bitw, shr, mask = switchbitch[255] + code = next_code() + bitcount += bitw + if code == 257: # EOI + break + result_append(table[code]) + else: + if code < lentable: + decoded = table[code] + newcode = table[oldcode] + decoded[:1] + else: + newcode = table[oldcode] + newcode += newcode[:1] + decoded = newcode + result_append(decoded) + table_append(newcode) + lentable += 1 + oldcode = code + if lentable in switchbitch: + bitw, shr, mask = switchbitch[lentable] + + if code != 257: + warnings.warn("unexpected end of LZW stream (code %i)" % code) + + return b"".join(result) + + +@_replace_by("_tifffile.unpack_ints") +def unpack_ints(data, dtype, itemsize, runlen=0): + """Decompress byte string to array of integers of any bit size <= 32. + + This Python implementation is slow and only handles itemsizes 1, 2, 4, 8, + 16, 32, and 64. + + Parameters + ---------- + data : byte str + Data to decompress. + dtype : numpy.dtype or str + A numpy boolean or integer type. + itemsize : int + Number of bits per integer. + runlen : int + Number of consecutive integers, after which to start at next byte. + + Examples + -------- + >>> unpack_ints(b'a', 'B', 1) + array([0, 1, 1, 0, 0, 0, 0, 1], dtype=uint8) + >>> unpack_ints(b'ab', 'B', 2) + array([1, 2, 0, 1, 1, 2, 0, 2], dtype=uint8) + + """ + if itemsize == 1: # bitarray + data = numpy.frombuffer(data, "|B") + data = numpy.unpackbits(data) + if runlen % 8: + data = data.reshape(-1, runlen + (8 - runlen % 8)) + data = data[:, :runlen].reshape(-1) + return data.astype(dtype) + + dtype = numpy.dtype(dtype) + if itemsize in (8, 16, 32, 64): + return numpy.frombuffer(data, dtype) + if itemsize not in (1, 2, 4, 8, 16, 32): + raise ValueError("itemsize not supported: %i" % itemsize) + if dtype.kind not in "biu": + raise ValueError("invalid dtype") + + itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize) + if itembytes != dtype.itemsize: + raise ValueError("dtype.itemsize too small") + if runlen == 0: + runlen = (8 * len(data)) // itemsize + skipbits = runlen * itemsize % 8 + if skipbits: + skipbits = 8 - skipbits + shrbits = itembytes * 8 - itemsize + bitmask = int(itemsize * "1" + "0" * shrbits, 2) + dtypestr = ">" + dtype.char # dtype always big-endian? + + unpack = struct.unpack + size = runlen * (len(data) * 8 // (runlen * itemsize + skipbits)) + result = numpy.empty((size,), dtype) + bitcount = 0 + for i in range(size): + start = bitcount // 8 + s = data[start : start + itembytes] + try: + code = unpack(dtypestr, s)[0] + except Exception: + code = unpack(dtypestr, s + b"\x00" * (itembytes - len(s)))[0] + code <<= bitcount % 8 + code &= bitmask + result[i] = code >> shrbits + bitcount += itemsize + if (i + 1) % runlen == 0: + bitcount += skipbits + return result + + +def unpack_rgb(data, dtype=">> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff) + >>> print(unpack_rgb(data, '>> print(unpack_rgb(data, '>> print(unpack_rgb(data, '= bits) + data = numpy.frombuffer(data, dtype.byteorder + dt) + result = numpy.empty((data.size, len(bitspersample)), dtype.char) + for i, bps in enumerate(bitspersample): + t = data >> int(numpy.sum(bitspersample[i + 1 :])) + t &= int("0b" + "1" * bps, 2) + if rescale: + o = ((dtype.itemsize * 8) // bps + 1) * bps + if o > data.dtype.itemsize * 8: + t = t.astype("I") + t *= (2**o - 1) // (2**bps - 1) + t //= 2 ** (o - (dtype.itemsize * 8)) + result[:, i] = t + return result.reshape(-1) + + +@_replace_by("_tifffile.reverse_bitorder") +def reverse_bitorder(data): + """Reverse bits in each byte of byte string or numpy array. + + Decode data where pixels with lower column values are stored in the + lower-order bits of the bytes (FillOrder is LSB2MSB). + + Parameters + ---------- + data : byte string or ndarray + The data to be bit reversed. If byte string, a new bit-reversed byte + string is returned. Numpy arrays are bit-reversed in-place. + + Examples + -------- + >>> reverse_bitorder(b'\\x01\\x64') + b'\\x80&' + >>> data = numpy.array([1, 666], dtype='uint16') + >>> reverse_bitorder(data) + >>> data + array([ 128, 16473], dtype=uint16) + + """ + try: + view = data.view("uint8") + numpy.take(TIFF.REVERSE_BITORDER_ARRAY, view, out=view) + except AttributeError: + return data.translate(TIFF.REVERSE_BITORDER_BYTES) + except ValueError: + raise NotImplementedError("slices of arrays not supported") + + +def apply_colormap(image, colormap, contig=True): + """Return palette-colored image. + + The image values are used to index the colormap on axis 1. The returned + image is of shape image.shape+colormap.shape[0] and dtype colormap.dtype. + + Parameters + ---------- + image : numpy.ndarray + Indexes into the colormap. + colormap : numpy.ndarray + RGB lookup table aka palette of shape (3, 2**bits_per_sample). + contig : bool + If True, return a contiguous array. + + Examples + -------- + >>> image = numpy.arange(256, dtype='uint8') + >>> colormap = numpy.vstack([image, image, image]).astype('uint16') * 256 + >>> apply_colormap(image, colormap)[-1] + array([65280, 65280, 65280], dtype=uint16) + + """ + image = numpy.take(colormap, image, axis=1) + image = numpy.rollaxis(image, 0, image.ndim) + if contig: + image = numpy.ascontiguousarray(image) + return image + + +def reorient(image, orientation): + """Return reoriented view of image array. + + Parameters + ---------- + image : numpy.ndarray + Non-squeezed output of asarray() functions. + Axes -3 and -2 must be image length and width respectively. + orientation : int or str + One of TIFF.ORIENTATION names or values. + + """ + ORIENTATION = TIFF.ORIENTATION + orientation = enumarg(ORIENTATION, orientation) + + if orientation == ORIENTATION.TOPLEFT: + return image + elif orientation == ORIENTATION.TOPRIGHT: + return image[..., ::-1, :] + elif orientation == ORIENTATION.BOTLEFT: + return image[..., ::-1, :, :] + elif orientation == ORIENTATION.BOTRIGHT: + return image[..., ::-1, ::-1, :] + elif orientation == ORIENTATION.LEFTTOP: + return numpy.swapaxes(image, -3, -2) + elif orientation == ORIENTATION.RIGHTTOP: + return numpy.swapaxes(image, -3, -2)[..., ::-1, :] + elif orientation == ORIENTATION.RIGHTBOT: + return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] + elif orientation == ORIENTATION.LEFTBOT: + return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] + + +def repeat_nd(a, repeats): + """Return read-only view into input array with elements repeated. + + Zoom nD image by integer factors using nearest neighbor interpolation + (box filter). + + Parameters + ---------- + a : array_like + Input array. + repeats : sequence of int + The number of repetitions to apply along each dimension of input array. + + Example + ------- + >>> repeat_nd([[1, 2], [3, 4]], (2, 2)) + array([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 3, 4, 4], + [3, 3, 4, 4]]) + + """ + a = numpy.asarray(a) + reshape = [] + shape = [] + strides = [] + for i, j, k in zip(a.strides, a.shape, repeats): + shape.extend((j, k)) + strides.extend((i, 0)) + reshape.append(j * k) + return numpy.lib.stride_tricks.as_strided( + a, shape, strides, writeable=False + ).reshape(reshape) + + +def reshape_nd(data_or_shape, ndim): + """Return image array or shape with at least ndim dimensions. + + Prepend 1s to image shape as necessary. + + >>> reshape_nd(numpy.empty(0), 1).shape + (0,) + >>> reshape_nd(numpy.empty(1), 2).shape + (1, 1) + >>> reshape_nd(numpy.empty((2, 3)), 3).shape + (1, 2, 3) + >>> reshape_nd(numpy.empty((3, 4, 5)), 3).shape + (3, 4, 5) + >>> reshape_nd((2, 3), 3) + (1, 2, 3) + + """ + is_shape = isinstance(data_or_shape, tuple) + shape = data_or_shape if is_shape else data_or_shape.shape + if len(shape) >= ndim: + return data_or_shape + shape = (1,) * (ndim - len(shape)) + shape + return shape if is_shape else data_or_shape.reshape(shape) + + +def squeeze_axes(shape, axes, skip="XY"): + """Return shape and axes with single-dimensional entries removed. + + Remove unused dimensions unless their axes are listed in 'skip'. + + >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') + ((5, 2, 1), 'TYX') + + """ + if len(shape) != len(axes): + raise ValueError("dimensions of axes and shape do not match") + shape, axes = zip(*(i for i in zip(shape, axes) if i[0] > 1 or i[1] in skip)) + return tuple(shape), "".join(axes) + + +def transpose_axes(image, axes, asaxes="CTZYX"): + """Return image with its axes permuted to match specified axes. + + A view is returned if possible. + + >>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape + (5, 2, 1, 3, 4) + + """ + for ax in axes: + if ax not in asaxes: + raise ValueError("unknown axis %s" % ax) + # add missing axes to image + shape = image.shape + for ax in reversed(asaxes): + if ax not in axes: + axes = ax + axes + shape = (1,) + shape + image = image.reshape(shape) + # transpose axes + image = image.transpose([axes.index(ax) for ax in asaxes]) + return image + + +def reshape_axes(axes, shape, newshape, unknown="Q"): + """Return axes matching new shape. + + Unknown dimensions are labelled 'Q'. + + >>> reshape_axes('YXS', (219, 301, 1), (219, 301)) + 'YX' + >>> reshape_axes('IYX', (12, 219, 301), (3, 4, 219, 1, 301, 1)) + 'QQYQXQ' + + """ + shape = tuple(shape) + newshape = tuple(newshape) + if len(axes) != len(shape): + raise ValueError("axes do not match shape") + + size = product(shape) + newsize = product(newshape) + if size != newsize: + raise ValueError("cannot reshape %s to %s" % (shape, newshape)) + if not axes or not newshape: + return "" + + lendiff = max(0, len(shape) - len(newshape)) + if lendiff: + newshape = newshape + (1,) * lendiff + + i = len(shape) - 1 + prodns = 1 + prods = 1 + result = [] + for ns in newshape[::-1]: + prodns *= ns + while i > 0 and shape[i] == 1 and ns != 1: + i -= 1 + if ns == shape[i] and prodns == prods * shape[i]: + prods *= shape[i] + result.append(axes[i]) + i -= 1 + else: + result.append(unknown) + + return "".join(reversed(result[lendiff:])) + + +def stack_pages(pages, out=None, maxworkers=1, *args, **kwargs): + """Read data from sequence of TiffPage and stack them vertically. + + Additional parameters are passed to the TiffPage.asarray function. + + """ + npages = len(pages) + if npages == 0: + raise ValueError("no pages") + + if npages == 1: + return pages[0].asarray(out=out, *args, **kwargs) + + page0 = next(p for p in pages if p is not None) + page0.asarray(validate=None) # ThreadPoolExecutor swallows exceptions + shape = (npages,) + page0.keyframe.shape + dtype = page0.keyframe.dtype + out = create_output(out, shape, dtype) + + if maxworkers is None: + maxworkers = multiprocessing.cpu_count() // 2 + page0.parent.filehandle.lock = maxworkers > 1 + + filecache = OpenFileCache( + size=max(4, maxworkers), lock=page0.parent.filehandle.lock + ) + + def func(page, index, out=out, filecache=filecache, args=args, kwargs=kwargs): + """Read, decode, and copy page data.""" + if page is not None: + filecache.open(page.parent.filehandle) + out[index] = page.asarray( + lock=filecache.lock, reopen=False, validate=False, *args, **kwargs + ) + filecache.close(page.parent.filehandle) + + if maxworkers < 2: + for i, page in enumerate(pages): + func(page, i) + else: + with concurrent.futures.ThreadPoolExecutor(maxworkers) as executor: + executor.map(func, pages, range(npages)) + + filecache.clear() + page0.parent.filehandle.lock = None + + return out + + +def clean_offsets_counts(offsets, counts): + """Return cleaned offsets and byte counts. + + Remove zero offsets and counts. Use to sanitize _offsets and _bytecounts + tag values for strips or tiles. + + """ + offsets = list(offsets) + counts = list(counts) + assert len(offsets) == len(counts) + j = 0 + for i, (o, b) in enumerate(zip(offsets, counts)): + if o > 0 and b > 0: + if i > j: + offsets[j] = o + counts[j] = b + j += 1 + elif b > 0 and o <= 0: + raise ValueError("invalid offset") + else: + warnings.warn("empty byte count") + if j == 0: + j = 1 + return offsets[:j], counts[:j] + + +def buffered_read(fh, lock, offsets, bytecounts, buffersize=2**26): + """Return iterator over blocks read from file.""" + length = len(offsets) + i = 0 + while i < length: + data = [] + with lock: + size = 0 + while size < buffersize and i < length: + fh.seek(offsets[i]) + bytecount = bytecounts[i] + data.append(fh.read(bytecount)) + size += bytecount + i += 1 + for block in data: + yield block + + +def create_output(out, shape, dtype, mode="w+", suffix=".memmap"): + """Return numpy array where image data of shape and dtype can be copied. + + The 'out' parameter may have the following values or types: + + None + An empty array of shape and dtype is created and returned. + numpy.ndarray + An existing writable array of compatible dtype and shape. A view of + the same array is returned after verification. + 'memmap' or 'memmap:tempdir' + A memory-map to an array stored in a temporary binary file on disk + is created and returned. + str or open file + The file name or file object used to create a memory-map to an array + stored in a binary file on disk. The created memory-mapped array is + returned. + + """ + if out is None: + return numpy.zeros(shape, dtype) + if isinstance(out, str) and out[:6] == "memmap": + tempdir = out[7:] if len(out) > 7 else None + with tempfile.NamedTemporaryFile(dir=tempdir, suffix=suffix) as fh: + return numpy.memmap(fh, shape=shape, dtype=dtype, mode=mode) + if isinstance(out, numpy.ndarray): + if product(shape) != product(out.shape): + raise ValueError("incompatible output shape") + if not numpy.can_cast(dtype, out.dtype): + raise ValueError("incompatible output dtype") + return out.reshape(shape) + if isinstance(out, pathlib.Path): + out = str(out) + return numpy.memmap(out, shape=shape, dtype=dtype, mode=mode) + + +def matlabstr2py(string): + """Return Python object from Matlab string representation. + + Return str, bool, int, float, list (Matlab arrays or cells), or + dict (Matlab structures) types. + + Use to access ScanImage metadata. + + >>> matlabstr2py('1') + 1 + >>> matlabstr2py("['x y z' true false; 1 2.0 -3e4; NaN Inf @class]") + [['x y z', True, False], [1, 2.0, -30000.0], [nan, inf, '@class']] + >>> d = matlabstr2py("SI.hChannels.channelType = {'stripe' 'stripe'}\\n" + ... "SI.hChannels.channelsActive = 2") + >>> d['SI.hChannels.channelType'] + ['stripe', 'stripe'] + + """ + # TODO: handle invalid input + # TODO: review unboxing of multidimensional arrays + + def lex(s): + # return sequence of tokens from matlab string representation + tokens = ["["] + while True: + t, i = next_token(s) + if t is None: + break + if t == ";": + tokens.extend(("]", "[")) + elif t == "[": + tokens.extend(("[", "[")) + elif t == "]": + tokens.extend(("]", "]")) + else: + tokens.append(t) + s = s[i:] + tokens.append("]") + return tokens + + def next_token(s): + # return next token in matlab string + length = len(s) + if length == 0: + return None, 0 + i = 0 + while i < length and s[i] == " ": + i += 1 + if i == length: + return None, i + if s[i] in "{[;]}": + return s[i], i + 1 + if s[i] == "'": + j = i + 1 + while j < length and s[j] != "'": + j += 1 + return s[i : j + 1], j + 1 + if s[i] == "<": + j = i + 1 + while j < length and s[j] != ">": + j += 1 + return s[i : j + 1], j + 1 + j = i + while j < length and s[j] not in " {[;]}": + j += 1 + return s[i:j], j + + def value(s, fail=False): + # return Python value of token + s = s.strip() + if not s: + return s + if len(s) == 1: + try: + return int(s) + except Exception: + if fail: + raise ValueError() + return s + if s[0] == "'": + if fail and s[-1] != "'" or "'" in s[1:-1]: + raise ValueError() + return s[1:-1] + if s[0] == "<": + if fail and s[-1] != ">" or "<" in s[1:-1]: + raise ValueError() + return s + if fail and any(i in s for i in " ';[]{}"): + raise ValueError() + if s[0] == "@": + return s + if s in ("true", "True"): + return True + if s in ("false", "False"): + return False + if s[:6] == "zeros(": + return numpy.zeros([int(i) for i in s[6:-1].split(",")]).tolist() + if s[:5] == "ones(": + return numpy.ones([int(i) for i in s[5:-1].split(",")]).tolist() + if "." in s or "e" in s: + try: + return float(s) + except Exception: + pass + try: + return int(s) + except Exception: + pass + try: + return float(s) # nan, inf + except Exception: + if fail: + raise ValueError() + return s + + def parse(s): + # return Python value from string representation of Matlab value + s = s.strip() + try: + return value(s, fail=True) + except ValueError: + pass + result = add2 = [] + levels = [add2] + for t in lex(s): + if t in "[{": + add2 = [] + levels.append(add2) + elif t in "]}": + x = levels.pop() + if len(x) == 1 and isinstance(x[0], (list, str)): + x = x[0] + add2 = levels[-1] + add2.append(x) + else: + add2.append(value(t)) + if len(result) == 1 and isinstance(result[0], (list, str)): + result = result[0] + return result + + if "\r" in string or "\n" in string: + # structure + d = {} + for line in string.splitlines(): + line = line.strip() + if not line or line[0] == "%": + continue + k, v = line.split("=", 1) + k = k.strip() + if any(c in k for c in " ';[]{}<>"): + continue + d[k] = parse(v) + return d + return parse(string) + + +def stripnull(string, null=b"\x00"): + """Return string truncated at first null character. + + Clean NULL terminated C strings. For unicode strings use null='\\0'. + + >>> stripnull(b'string\\x00') + b'string' + >>> stripnull('string\\x00', null='\\0') + 'string' + + """ + i = string.find(null) + return string if (i < 0) else string[:i] + + +def stripascii(string): + """Return string truncated at last byte that is 7-bit ASCII. + + Clean NULL separated and terminated TIFF strings. + + >>> stripascii(b'string\\x00string\\n\\x01\\x00') + b'string\\x00string\\n' + >>> stripascii(b'\\x00') + b'' + + """ + # TODO: pythonize this + i = len(string) + while i: + i -= 1 + if 8 < byte2int(string[i]) < 127: + break + else: + i = -1 + return string[: i + 1] + + +def asbool(value, true=(b"true", "true"), false=(b"false", "false")): + """Return string as bool if possible, else raise TypeError. + + >>> asbool(b' False ') + False + + """ + value = value.strip().lower() + if value in true: # might raise UnicodeWarning/BytesWarning + return True + if value in false: + return False + raise TypeError() + + +def astype(value, types=None): + """Return argument as one of types if possible. + + >>> astype('42') + 42 + >>> astype('3.14') + 3.14 + >>> astype('True') + True + >>> astype(b'Neee-Wom') + 'Neee-Wom' + + """ + if types is None: + types = int, float, asbool, bytes2str + for typ in types: + try: + return typ(value) + except (ValueError, AttributeError, TypeError, UnicodeEncodeError): + pass + return value + + +def format_size(size, threshold=1536): + """Return file size as string from byte size. + + >>> format_size(1234) + '1234 B' + >>> format_size(12345678901) + '11.50 GiB' + + """ + if size < threshold: + return "%i B" % size + for unit in ("KiB", "MiB", "GiB", "TiB", "PiB"): + size /= 1024.0 + if size < threshold: + return "%.2f %s" % (size, unit) + + +def identityfunc(arg): + """Single argument identity function. + + >>> identityfunc('arg') + 'arg' + + """ + return arg + + +def nullfunc(*args, **kwargs): + """Null function. + + >>> nullfunc('arg', kwarg='kwarg') + + """ + return + + +def sequence(value): + """Return tuple containing value if value is not a sequence. + + >>> sequence(1) + (1,) + >>> sequence([1]) + [1] + + """ + try: + len(value) + return value + except TypeError: + return (value,) + + +def product(iterable): + """Return product of sequence of numbers. + + Equivalent of functools.reduce(operator.mul, iterable, 1). + Multiplying numpy integers might overflow. + + >>> product([2**8, 2**30]) + 274877906944 + >>> product([]) + 1 + + """ + prod = 1 + for i in iterable: + prod *= i + return prod + + +def natural_sorted(iterable): + """Return human sorted list of strings. + + E.g. for sorting file names. + + >>> natural_sorted(['f1', 'f2', 'f10']) + ['f1', 'f2', 'f10'] + + """ + + def sortkey(x): + return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)] + + numbers = re.compile(r"(\d+)") + return sorted(iterable, key=sortkey) + + +def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)): + """Return datetime object from timestamp in Excel serial format. + + Convert LSM time stamps. + + >>> excel_datetime(40237.029999999795) + datetime.datetime(2010, 2, 28, 0, 43, 11, 999982) + + """ + return epoch + datetime.timedelta(timestamp) + + +def julian_datetime(julianday, milisecond=0): + """Return datetime from days since 1/1/4713 BC and ms since midnight. + + Convert Julian dates according to MetaMorph. + + >>> julian_datetime(2451576, 54362783) + datetime.datetime(2000, 2, 2, 15, 6, 2, 783) + + """ + if julianday <= 1721423: + # no datetime before year 1 + return None + + a = julianday + 1 + if a > 2299160: + alpha = math.trunc((a - 1867216.25) / 36524.25) + a += 1 + alpha - alpha // 4 + b = a + (1524 if a > 1721423 else 1158) + c = math.trunc((b - 122.1) / 365.25) + d = math.trunc(365.25 * c) + e = math.trunc((b - d) / 30.6001) + + day = b - d - math.trunc(30.6001 * e) + month = e - (1 if e < 13.5 else 13) + year = c - (4716 if month > 2.5 else 4715) + + hour, milisecond = divmod(milisecond, 1000 * 60 * 60) + minute, milisecond = divmod(milisecond, 1000 * 60) + second, milisecond = divmod(milisecond, 1000) + + return datetime.datetime(year, month, day, hour, minute, second, milisecond) + + +def byteorder_isnative(byteorder): + """Return if byteorder matches the system's byteorder. + + >>> byteorder_isnative('=') + True + + """ + if byteorder == "=" or byteorder == sys.byteorder: + return True + keys = {"big": ">", "little": "<"} + return keys.get(byteorder, byteorder) == keys[sys.byteorder] + + +def recarray2dict(recarray): + """Return numpy.recarray as dict.""" + # TODO: subarrays + result = {} + for descr, value in zip(recarray.dtype.descr, recarray): + name, dtype = descr[:2] + if dtype[1] == "S": + value = bytes2str(stripnull(value)) + elif value.ndim < 2: + value = value.tolist() + result[name] = value + return result + + +def xml2dict(xml, sanitize=True, prefix=None): + """Return XML as dict. + + >>> xml2dict('1') + {'root': {'key': 1, 'attr': 'name'}} + + """ + from xml.etree import cElementTree as etree # delayed import + + at = tx = "" + if prefix: + at, tx = prefix + + def astype(value): + # return value as int, float, bool, or str + for t in (int, float, asbool): + try: + return t(value) + except Exception: + pass + return value + + def etree2dict(t): + # adapted from https://stackoverflow.com/a/10077069/453463 + key = t.tag + if sanitize: + key = key.rsplit("}", 1)[-1] + d = {key: {} if t.attrib else None} + children = list(t) + if children: + dd = collections.defaultdict(list) + for dc in map(etree2dict, children): + for k, v in dc.items(): + dd[k].append(astype(v)) + d = { + key: { + k: astype(v[0]) if len(v) == 1 else astype(v) for k, v in dd.items() + } + } + if t.attrib: + d[key].update((at + k, astype(v)) for k, v in t.attrib.items()) + if t.text: + text = t.text.strip() + if children or t.attrib: + if text: + d[key][tx + "value"] = astype(text) + else: + d[key] = astype(text) + return d + + return etree2dict(etree.fromstring(xml)) + + +def hexdump(bytestr, width=75, height=24, snipat=-2, modulo=2, ellipsis="..."): + """Return hexdump representation of byte string. + + >>> hexdump(binascii.unhexlify('49492a00080000000e00fe0004000100')) + '49 49 2a 00 08 00 00 00 0e 00 fe 00 04 00 01 00 II*.............' + + """ + size = len(bytestr) + if size < 1 or width < 2 or height < 1: + return "" + if height == 1: + addr = b"" + bytesperline = min(modulo * (((width - len(addr)) // 4) // modulo), size) + if bytesperline < 1: + return "" + nlines = 1 + else: + addr = b"%%0%ix: " % len(b"%x" % size) + bytesperline = min(modulo * (((width - len(addr % 1)) // 4) // modulo), size) + if bytesperline < 1: + return "" + width = 3 * bytesperline + len(addr % 1) + nlines = (size - 1) // bytesperline + 1 + + if snipat is None or snipat == 1: + snipat = height + elif 0 < abs(snipat) < 1: + snipat = int(math.floor(height * snipat)) + if snipat < 0: + snipat += height + + if height == 1 or nlines == 1: + blocks = [(0, bytestr[:bytesperline])] + addr = b"" + height = 1 + width = 3 * bytesperline + elif height is None or nlines <= height: + blocks = [(0, bytestr)] + elif snipat <= 0: + start = bytesperline * (nlines - height) + blocks = [(start, bytestr[start:])] # (start, None) + elif snipat >= height or height < 3: + end = bytesperline * height + blocks = [(0, bytestr[:end])] # (end, None) + else: + end1 = bytesperline * snipat + end2 = bytesperline * (height - snipat - 1) + blocks = [ + (0, bytestr[:end1]), + (size - end1 - end2, None), + (size - end2, bytestr[size - end2 :]), + ] + + ellipsis = str2bytes(ellipsis) + result = [] + for start, bytestr in blocks: + if bytestr is None: + result.append(ellipsis) # 'skip %i bytes' % start) + continue + hexstr = binascii.hexlify(bytestr) + strstr = re.sub(rb"[^\x20-\x7f]", b".", bytestr) + for i in range(0, len(bytestr), bytesperline): + h = hexstr[2 * i : 2 * i + bytesperline * 2] + r = (addr % (i + start)) if height > 1 else addr + r += b" ".join(h[i : i + 2] for i in range(0, 2 * bytesperline, 2)) + r += b" " * (width - len(r)) + r += strstr[i : i + bytesperline] + result.append(r) + result = b"\n".join(result) + if sys.version_info[0] == 3: + result = result.decode("ascii") + return result + + +def isprintable(string): + """Return if all characters in string are printable. + + >>> isprintable('abc') + True + >>> isprintable(b'\01') + False + + """ + string = string.strip() + if len(string) < 1: + return True + if sys.version_info[0] == 3: + try: + return string.isprintable() + except Exception: + pass + try: + return string.decode("utf-8").isprintable() + except Exception: + pass + else: + if string.isalnum(): + return True + printable = ( + "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST" + "UVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c" + ) + return all(c in printable for c in string) + + +def clean_whitespace(string, compact=False): + """Return string with compressed whitespace.""" + for a, b in ( + ("\r\n", "\n"), + ("\r", "\n"), + ("\n\n", "\n"), + ("\t", " "), + (" ", " "), + ): + string = string.replace(a, b) + if compact: + for a, b in (("\n", " "), ("[ ", "["), (" ", " "), (" ", " "), (" ", " ")): + string = string.replace(a, b) + return string.strip() + + +def pformat_xml(xml): + """Return pretty formatted XML.""" + try: + import lxml.etree as etree # delayed import + + if not isinstance(xml, bytes): + xml = xml.encode("utf-8") + xml = etree.parse(io.BytesIO(xml)) + xml = etree.tostring( + xml, pretty_print=True, xml_declaration=True, encoding=xml.docinfo.encoding + ) + xml = bytes2str(xml) + except Exception: + if isinstance(xml, bytes): + xml = bytes2str(xml) + xml = xml.replace("><", ">\n<") + return xml.replace(" ", " ").replace("\t", " ") + + +def pformat(arg, width=79, height=24, compact=True): + """Return pretty formatted representation of object as string. + + Whitespace might be altered. + + """ + if height is None or height < 1: + height = 1024 + if width is None or width < 1: + width = 256 + + npopt = numpy.get_printoptions() + numpy.set_printoptions(threshold=100, linewidth=width) + + if isinstance(arg, basestring): + if arg[:5].lower() in (" height: + arg = "\n".join(argl[: height // 2] + ["..."] + argl[-height // 2 :]) + return arg + + +def snipstr(string, width=79, snipat=0.5, ellipsis="..."): + """Return string cut to specified length. + + >>> snipstr('abcdefghijklmnop', 8) + 'abc...op' + + """ + if ellipsis is None: + if isinstance(string, bytes): + ellipsis = b"..." + else: + ellipsis = "\u2026" # does not print on win-py3.5 + esize = len(ellipsis) + + splitlines = string.splitlines() + # TODO: finish and test multiline snip + + result = [] + for line in splitlines: + if line is None: + result.append(ellipsis) + continue + linelen = len(line) + if linelen <= width: + result.append(string) + continue + + split = snipat + if split is None or split == 1: + split = linelen + elif 0 < abs(split) < 1: + split = int(math.floor(linelen * split)) + if split < 0: + split += linelen + if split < 0: + split = 0 + + if esize == 0 or width < esize + 1: + if split <= 0: + result.append(string[-width:]) + else: + result.append(string[:width]) + elif split <= 0: + result.append(ellipsis + string[esize - width :]) + elif split >= linelen or width < esize + 4: + result.append(string[: width - esize] + ellipsis) + else: + splitlen = linelen - width + esize + end1 = split - splitlen // 2 + end2 = end1 + splitlen + result.append(string[:end1] + ellipsis + string[end2:]) + + if isinstance(string, bytes): + return b"\n".join(result) + else: + return "\n".join(result) + + +def enumarg(enum, arg): + """Return enum member from its name or value. + + >>> enumarg(TIFF.PHOTOMETRIC, 2) + + >>> enumarg(TIFF.PHOTOMETRIC, 'RGB') + + + """ + try: + return enum(arg) + except Exception: + try: + return enum[arg.upper()] + except Exception: + raise ValueError("invalid argument %s" % arg) + + +def parse_kwargs(kwargs, *keys, **keyvalues): + """Return dict with keys from keys|keyvals and values from kwargs|keyvals. + + Existing keys are deleted from kwargs. + + >>> kwargs = {'one': 1, 'two': 2, 'four': 4} + >>> kwargs2 = parse_kwargs(kwargs, 'two', 'three', four=None, five=5) + >>> kwargs == {'one': 1} + True + >>> kwargs2 == {'two': 2, 'four': 4, 'five': 5} + True + + """ + result = {} + for key in keys: + if key in kwargs: + result[key] = kwargs[key] + del kwargs[key] + for key, value in keyvalues.items(): + if key in kwargs: + result[key] = kwargs[key] + del kwargs[key] + else: + result[key] = value + return result + + +def update_kwargs(kwargs, **keyvalues): + """Update dict with keys and values if keys do not already exist. + + >>> kwargs = {'one': 1, } + >>> update_kwargs(kwargs, one=None, two=2) + >>> kwargs == {'one': 1, 'two': 2} + True + + """ + for key, value in keyvalues.items(): + if key not in kwargs: + kwargs[key] = value + + +def validate_jhove(filename, jhove="jhove", ignore=("More than 50 IFDs",)): + """Validate TIFF file using jhove -m TIFF-hul. + + Raise ValueError if jhove outputs an error message unless the message + contains one of the strings in 'ignore'. + + JHOVE does not support bigtiff or more than 50 IFDs. + + See `JHOVE TIFF-hul Module `_ + + """ + import subprocess # noqa: delayed import + + out = subprocess.check_output([jhove, filename, "-m", "TIFF-hul"]) + if b"ErrorMessage: " in out: + for line in out.splitlines(): + line = line.strip() + if line.startswith(b"ErrorMessage: "): + error = line[14:].decode("utf8") + for i in ignore: + if i in error: + break + else: + raise ValueError(error) + break + + +def lsm2bin(lsmfile, binfile=None, tile=(256, 256), verbose=True): + """Convert [MP]TZCYX LSM file to series of BIN files. + + One BIN file containing 'ZCYX' data are created for each position, time, + and tile. The position, time, and tile indices are encoded at the end + of the filenames. + + """ + verbose = print_ if verbose else nullfunc + + if binfile is None: + binfile = lsmfile + elif binfile.lower() == "none": + binfile = None + if binfile: + binfile += "_(z%ic%iy%ix%i)_m%%ip%%it%%03iy%%ix%%i.bin" + + verbose("\nOpening LSM file... ", end="", flush=True) + start_time = time.time() + + with TiffFile(lsmfile) as lsm: + if not lsm.is_lsm: + verbose("\n", lsm, flush=True) + raise ValueError("not a LSM file") + series = lsm.series[0] # first series contains the image data + shape = series.shape + axes = series.axes + dtype = series.dtype + size = product(shape) * dtype.itemsize + + verbose("%.3f s" % (time.time() - start_time)) + # verbose(lsm, flush=True) + verbose( + "Image\n axes: %s\n shape: %s\n dtype: %s\n size: %s" + % (axes, shape, dtype, format_size(size)), + flush=True, + ) + if not series.axes.endswith("TZCYX"): + raise ValueError("not a *TZCYX LSM file") + + verbose("Copying image from LSM to BIN files", end="", flush=True) + start_time = time.time() + tiles = shape[-2] // tile[-2], shape[-1] // tile[-1] + if binfile: + binfile = binfile % (shape[-4], shape[-3], tile[0], tile[1]) + shape = (1,) * (7 - len(shape)) + shape + # cache for ZCYX stacks and output files + data = numpy.empty(shape[3:], dtype=dtype) + out = numpy.empty((shape[-4], shape[-3], tile[0], tile[1]), dtype=dtype) + # iterate over Tiff pages containing data + pages = iter(series.pages) + for m in range(shape[0]): # mosaic axis + for p in range(shape[1]): # position axis + for t in range(shape[2]): # time axis + for z in range(shape[3]): # z slices + data[z] = next(pages).asarray() + for y in range(tiles[0]): # tile y + for x in range(tiles[1]): # tile x + out[:] = data[ + ..., + y * tile[0] : (y + 1) * tile[0], + x * tile[1] : (x + 1) * tile[1], + ] + if binfile: + out.tofile(binfile % (m, p, t, y, x)) + verbose(".", end="", flush=True) + verbose(" %.3f s" % (time.time() - start_time)) + + +def imshow( + data, + title=None, + vmin=0, + vmax=None, + cmap=None, + bitspersample=None, + photometric="RGB", + interpolation=None, + dpi=96, + figure=None, + subplot=111, + maxdim=32768, + **kwargs +): + """Plot n-dimensional images using matplotlib.pyplot. + + Return figure, subplot and plot axis. + Requires pyplot already imported C{from matplotlib import pyplot}. + + Parameters + ---------- + bitspersample : int or None + Number of bits per channel in integer RGB images. + photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'} + The color space of the image data. + title : str + Window and subplot title. + figure : matplotlib.figure.Figure (optional). + Matplotlib to use for plotting. + subplot : int + A matplotlib.pyplot.subplot axis. + maxdim : int + maximum image width and length. + kwargs : optional + Arguments for matplotlib.pyplot.imshow. + + """ + isrgb = photometric in ("RGB",) # 'PALETTE', 'YCBCR' + if data.dtype.kind == "b": + isrgb = False + if isrgb and not ( + data.shape[-1] in (3, 4) or (data.ndim > 2 and data.shape[-3] in (3, 4)) + ): + isrgb = False + photometric = "MINISBLACK" + + data = data.squeeze() + if photometric in ("MINISWHITE", "MINISBLACK", None): + data = reshape_nd(data, 2) + else: + data = reshape_nd(data, 3) + + dims = data.ndim + if dims < 2: + raise ValueError("not an image") + elif dims == 2: + dims = 0 + isrgb = False + else: + if isrgb and data.shape[-3] in (3, 4): + data = numpy.swapaxes(data, -3, -2) + data = numpy.swapaxes(data, -2, -1) + elif not isrgb and ( + data.shape[-1] < data.shape[-2] // 8 + and data.shape[-1] < data.shape[-3] // 8 + and data.shape[-1] < 5 + ): + data = numpy.swapaxes(data, -3, -1) + data = numpy.swapaxes(data, -2, -1) + isrgb = isrgb and data.shape[-1] in (3, 4) + dims -= 3 if isrgb else 2 + + if isrgb: + data = data[..., :maxdim, :maxdim, :maxdim] + else: + data = data[..., :maxdim, :maxdim] + + if photometric == "PALETTE" and isrgb: + datamax = data.max() + if datamax > 255: + data = data >> 8 # possible precision loss + data = data.astype("B") + elif data.dtype.kind in "ui": + if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: + try: + bitspersample = int(math.ceil(math.log(data.max(), 2))) + except Exception: + bitspersample = data.dtype.itemsize * 8 + elif not isinstance(bitspersample, inttypes): + # bitspersample can be tuple, e.g. (5, 6, 5) + bitspersample = data.dtype.itemsize * 8 + datamax = 2**bitspersample + if isrgb: + if bitspersample < 8: + data = data << (8 - bitspersample) + elif bitspersample > 8: + data = data >> (bitspersample - 8) # precision loss + data = data.astype("B") + elif data.dtype.kind == "f": + datamax = data.max() + if isrgb and datamax > 1.0: + if data.dtype.char == "d": + data = data.astype("f") + data /= datamax + else: + data = data / datamax + elif data.dtype.kind == "b": + datamax = 1 + elif data.dtype.kind == "c": + data = numpy.absolute(data) + datamax = data.max() + + if not isrgb: + if vmax is None: + vmax = datamax + if vmin is None: + if data.dtype.kind == "i": + dtmin = numpy.iinfo(data.dtype).min + vmin = numpy.min(data) + if vmin == dtmin: + vmin = numpy.min(data > dtmin) + if data.dtype.kind == "f": + dtmin = numpy.finfo(data.dtype).min + vmin = numpy.min(data) + if vmin == dtmin: + vmin = numpy.min(data > dtmin) + else: + vmin = 0 + + pyplot = sys.modules["matplotlib.pyplot"] + + if figure is None: + pyplot.rc("font", family="sans-serif", weight="normal", size=8) + figure = pyplot.figure( + dpi=dpi, figsize=(10.3, 6.3), frameon=True, facecolor="1.0", edgecolor="w" + ) + try: + figure.canvas.manager.window.title(title) + except Exception: + pass + size = len(title.splitlines()) if title else 1 + pyplot.subplots_adjust( + bottom=0.03 * (dims + 2), + top=0.98 - size * 0.03, + left=0.1, + right=0.95, + hspace=0.05, + wspace=0.0, + ) + subplot = pyplot.subplot(subplot) + + if title: + try: + title = unicode(title, "Windows-1252") + except TypeError: + pass + pyplot.title(title, size=11) + + if cmap is None: + if data.dtype.char == "?": + cmap = "gray" + elif data.dtype.kind in "buf" or vmin == 0: + cmap = "viridis" + else: + cmap = "coolwarm" + if photometric == "MINISWHITE": + cmap += "_r" + + image = pyplot.imshow( + numpy.atleast_2d(data[(0,) * dims].squeeze()), + vmin=vmin, + vmax=vmax, + cmap=cmap, + interpolation=interpolation, + **kwargs + ) + + if not isrgb: + pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 + + def format_coord(x, y): + # callback function to format coordinate display in toolbar + x = int(x + 0.5) + y = int(y + 0.5) + try: + if dims: + return "%s @ %s [%4i, %4i]" % (curaxdat[1][y, x], current, y, x) + return "%s @ [%4i, %4i]" % (data[y, x], y, x) + except IndexError: + return "" + + def none(event): + return "" + + subplot.format_coord = format_coord + image.get_cursor_data = none + image.format_cursor_data = none + + if dims: + current = list((0,) * dims) + curaxdat = [0, data[tuple(current)].squeeze()] + sliders = [ + pyplot.Slider( + pyplot.axes([0.125, 0.03 * (axis + 1), 0.725, 0.025]), + "Dimension %i" % axis, + 0, + data.shape[axis] - 1, + 0, + facecolor="0.5", + valfmt="%%.0f [%i]" % data.shape[axis], + ) + for axis in range(dims) + ] + for slider in sliders: + slider.drawon = False + + def set_image(current, sliders=sliders, data=data): + # change image and redraw canvas + curaxdat[1] = data[tuple(current)].squeeze() + image.set_data(curaxdat[1]) + for ctrl, index in zip(sliders, current): + ctrl.eventson = False + ctrl.set_val(index) + ctrl.eventson = True + figure.canvas.draw() + + def on_changed(index, axis, data=data, current=current): + # callback function for slider change event + index = int(round(index)) + curaxdat[0] = axis + if index == current[axis]: + return + if index >= data.shape[axis]: + index = 0 + elif index < 0: + index = data.shape[axis] - 1 + current[axis] = index + set_image(current) + + def on_keypressed(event, data=data, current=current): + # callback function for key press event + key = event.key + axis = curaxdat[0] + if str(key) in "0123456789": + on_changed(key, axis) + elif key == "right": + on_changed(current[axis] + 1, axis) + elif key == "left": + on_changed(current[axis] - 1, axis) + elif key == "up": + curaxdat[0] = 0 if axis == len(data.shape) - 1 else axis + 1 + elif key == "down": + curaxdat[0] = len(data.shape) - 1 if axis == 0 else axis - 1 + elif key == "end": + on_changed(data.shape[axis] - 1, axis) + elif key == "home": + on_changed(0, axis) + + figure.canvas.mpl_connect("key_press_event", on_keypressed) + for axis, ctrl in enumerate(sliders): + ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) + + return figure, subplot, image + + +def _app_show(): + """Block the GUI. For use as skimage plugin.""" + pyplot = sys.modules["matplotlib.pyplot"] + pyplot.show() + + +def askopenfilename(**kwargs): + """Return file name(s) from Tkinter's file open dialog.""" + try: + from Tkinter import Tk + import tkFileDialog as filedialog + except ImportError: + from tkinter import Tk, filedialog + root = Tk() + root.withdraw() + root.update() + filenames = filedialog.askopenfilename(**kwargs) + root.destroy() + return filenames + + +def main(argv=None): + """Command line usage main function.""" + if float(sys.version[0:3]) < 2.7: + print("This script requires Python version 2.7 or better.") + print("This is Python version %s" % sys.version) + return 0 + if argv is None: + argv = sys.argv + + import optparse # TODO: use argparse + + parser = optparse.OptionParser( + usage="usage: %prog [options] path", + description="Display image data in TIFF files.", + version="%%prog %s" % __version__, + ) + opt = parser.add_option + opt("-p", "--page", dest="page", type="int", default=-1, help="display single page") + opt( + "-s", + "--series", + dest="series", + type="int", + default=-1, + help="display series of pages of same shape", + ) + opt( + "--nomultifile", + dest="nomultifile", + action="store_true", + default=False, + help="do not read OME series from multiple files", + ) + opt( + "--noplots", + dest="noplots", + type="int", + default=8, + help="maximum number of plots", + ) + opt( + "--interpol", + dest="interpol", + metavar="INTERPOL", + default="bilinear", + help="image interpolation method", + ) + opt("--dpi", dest="dpi", type="int", default=96, help="plot resolution") + opt( + "--vmin", + dest="vmin", + type="int", + default=None, + help="minimum value for colormapping", + ) + opt( + "--vmax", + dest="vmax", + type="int", + default=None, + help="maximum value for colormapping", + ) + opt( + "--debug", + dest="debug", + action="store_true", + default=False, + help="raise exception on failures", + ) + opt( + "--doctest", + dest="doctest", + action="store_true", + default=False, + help="runs the docstring examples", + ) + opt("-v", "--detail", dest="detail", type="int", default=2) + opt("-q", "--quiet", dest="quiet", action="store_true") + + settings, path = parser.parse_args() + path = " ".join(path) + + if settings.doctest: + import doctest + + doctest.testmod(optionflags=doctest.ELLIPSIS) + return 0 + if not path: + path = askopenfilename( + title="Select a TIFF file", filetypes=TIFF.FILEOPEN_FILTER + ) + if not path: + parser.error("No file specified") + + if any(i in path for i in "?*"): + path = glob.glob(path) + if not path: + print("no files match the pattern") + return 0 + # TODO: handle image sequences + path = path[0] + + if not settings.quiet: + print("\nReading file structure...", end=" ") + start = time.time() + try: + tif = TiffFile(path, multifile=not settings.nomultifile) + except Exception as e: + if settings.debug: + raise + else: + print("\n", e) + sys.exit(0) + if not settings.quiet: + print("%.3f ms" % ((time.time() - start) * 1e3)) + + if tif.is_ome: + settings.norgb = True + + images = [] + if settings.noplots > 0: + if not settings.quiet: + print("Reading image data... ", end=" ") + + def notnone(x): + return next(i for i in x if i is not None) + + start = time.time() + try: + if settings.page >= 0: + images = [(tif.asarray(key=settings.page), tif[settings.page], None)] + elif settings.series >= 0: + images = [ + ( + tif.asarray(series=settings.series), + notnone(tif.series[settings.series]._pages), + tif.series[settings.series], + ) + ] + else: + images = [] + for i, s in enumerate(tif.series[: settings.noplots]): + try: + images.append( + (tif.asarray(series=i), notnone(s._pages), tif.series[i]) + ) + except ValueError as e: + images.append((None, notnone(s.pages), None)) + if settings.debug: + raise + else: + print("\nSeries %i failed: %s... " % (i, e), end="") + if not settings.quiet: + print("%.3f ms" % ((time.time() - start) * 1e3)) + except Exception as e: + if settings.debug: + raise + else: + print(e) + + if not settings.quiet: + print() + print(TiffFile.__str__(tif, detail=int(settings.detail))) + print() + tif.close() + + if images and settings.noplots > 0: + try: + import matplotlib + + matplotlib.use("TkAgg") + from matplotlib import pyplot + except ImportError as e: + warnings.warn("failed to import matplotlib.\n%s" % e) + else: + for img, page, series in images: + if img is None: + continue + vmin, vmax = settings.vmin, settings.vmax + if "GDAL_NODATA" in page.tags: + try: + vmin = numpy.min( + img[img > float(page.tags["GDAL_NODATA"].value)] + ) + except ValueError: + pass + if tif.is_stk: + try: + vmin = tif.stk_metadata["MinScale"] + vmax = tif.stk_metadata["MaxScale"] + except KeyError: + pass + else: + if vmax <= vmin: + vmin, vmax = settings.vmin, settings.vmax + if series: + title = "%s\n%s\n%s" % (str(tif), str(page), str(series)) + else: + title = "%s\n %s" % (str(tif), str(page)) + photometric = "MINISBLACK" + if page.photometric not in (3,): + photometric = TIFF.PHOTOMETRIC(page.photometric).name + imshow( + img, + title=title, + vmin=vmin, + vmax=vmax, + bitspersample=page.bitspersample, + photometric=photometric, + interpolation=settings.interpol, + dpi=settings.dpi, + ) + pyplot.show() + + +if sys.version_info[0] == 2: + inttypes = int, long # noqa + + def print_(*args, **kwargs): + """Print function with flush support.""" + flush = kwargs.pop("flush", False) + print(*args, **kwargs) + if flush: + sys.stdout.flush() + + def bytes2str(b, encoding=None, errors=None): + """Return string from bytes.""" + return b + + def str2bytes(s, encoding=None): + """Return bytes from string.""" + return s + + def byte2int(b): + """Return value of byte as int.""" + return ord(b) + + class FileNotFoundError(IOError): + pass + + TiffFrame = TiffPage # noqa +else: + inttypes = int + basestring = str, bytes + unicode = str + print_ = print + + def bytes2str(b, encoding=None, errors="strict"): + """Return unicode string from encoded bytes.""" + if encoding is not None: + return b.decode(encoding, errors) + try: + return b.decode("utf-8", errors) + except UnicodeDecodeError: + return b.decode("cp1252", errors) + + def str2bytes(s, encoding="cp1252"): + """Return bytes from unicode string.""" + return s.encode(encoding) + + def byte2int(b): + """Return value of byte as int.""" + return b + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/bsdf.py b/parrot/lib/python3.10/site-packages/imageio/plugins/bsdf.py new file mode 100644 index 0000000000000000000000000000000000000000..041d7e524366d312516b637481f51cd799d74d37 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/bsdf.py @@ -0,0 +1,324 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write BSDF files. + +Backend Library: internal + +The BSDF format enables reading and writing of image data in the +BSDF serialization format. This format allows storage of images, volumes, +and series thereof. Data can be of any numeric data type, and can +optionally be compressed. Each image/volume can have associated +meta data, which can consist of any data type supported by BSDF. + +By default, image data is lazily loaded; the actual image data is +not read until it is requested. This allows storing multiple images +in a single file and still have fast access to individual images. +Alternatively, a series of images can be read in streaming mode, reading +images as they are read (e.g. from http). + +BSDF is a simple generic binary format. It is easy to extend and there +are standard extension definitions for 2D and 3D image data. +Read more at http://bsdf.io. + + +Parameters +---------- +random_access : bool + Whether individual images in the file can be read in random order. + Defaults to True for normal files, and to False when reading from HTTP. + If False, the file is read in "streaming mode", allowing reading + files as they are read, but without support for "rewinding". + Note that setting this to True when reading from HTTP, the whole file + is read upon opening it (since lazy loading is not possible over HTTP). + +compression : int + Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib + compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2 + compression (more compact but slower). Default 1 (zlib). + Note that some BSDF implementations may not support compression + (e.g. JavaScript). + +""" + +import numpy as np + +from ..core import Format + + +def get_bsdf_serializer(options): + from . import _bsdf as bsdf + + class NDArrayExtension(bsdf.Extension): + """Copy of BSDF's NDArrayExtension but deal with lazy blobs.""" + + name = "ndarray" + cls = np.ndarray + + def encode(self, s, v): + return dict(shape=v.shape, dtype=str(v.dtype), data=v.tobytes()) + + def decode(self, s, v): + return v # return as dict, because of lazy blobs, decode in Image + + class ImageExtension(bsdf.Extension): + """We implement two extensions that trigger on the Image classes.""" + + def encode(self, s, v): + return dict(array=v.array, meta=v.meta) + + def decode(self, s, v): + return Image(v["array"], v["meta"]) + + class Image2DExtension(ImageExtension): + name = "image2d" + cls = Image2D + + class Image3DExtension(ImageExtension): + name = "image3d" + cls = Image3D + + exts = [NDArrayExtension, Image2DExtension, Image3DExtension] + serializer = bsdf.BsdfSerializer(exts, **options) + + return bsdf, serializer + + +class Image: + """Class in which we wrap the array and meta data. By using an extension + we can make BSDF trigger on these classes and thus encode the images. + as actual images. + """ + + def __init__(self, array, meta): + self.array = array + self.meta = meta + + def get_array(self): + if not isinstance(self.array, np.ndarray): + v = self.array + blob = v["data"] + if not isinstance(blob, bytes): # then it's a lazy bsdf.Blob + blob = blob.get_bytes() + self.array = np.frombuffer(blob, dtype=v["dtype"]) + self.array.shape = v["shape"] + return self.array + + def get_meta(self): + return self.meta + + +class Image2D(Image): + pass + + +class Image3D(Image): + pass + + +class BsdfFormat(Format): + """The BSDF format enables reading and writing of image data in the + BSDF serialization format. This format allows storage of images, volumes, + and series thereof. Data can be of any numeric data type, and can + optionally be compressed. Each image/volume can have associated + meta data, which can consist of any data type supported by BSDF. + + By default, image data is lazily loaded; the actual image data is + not read until it is requested. This allows storing multiple images + in a single file and still have fast access to individual images. + Alternatively, a series of images can be read in streaming mode, reading + images as they are read (e.g. from http). + + BSDF is a simple generic binary format. It is easy to extend and there + are standard extension definitions for 2D and 3D image data. + Read more at http://bsdf.io. + + Parameters for reading + ---------------------- + random_access : bool + Whether individual images in the file can be read in random order. + Defaults to True for normal files, and to False when reading from HTTP. + If False, the file is read in "streaming mode", allowing reading + files as they are read, but without support for "rewinding". + Note that setting this to True when reading from HTTP, the whole file + is read upon opening it (since lazy loading is not possible over HTTP). + + Parameters for saving + --------------------- + compression : {0, 1, 2} + Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib + compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2 + compression (more compact but slower). Default 1 (zlib). + Note that some BSDF implementations may not support compression + (e.g. JavaScript). + + """ + + def _can_read(self, request): + if request.mode[1] in (self.modes + "?"): + # if request.extension in self.extensions: + # return True + if request.firstbytes.startswith(b"BSDF"): + return True + + def _can_write(self, request): + if request.mode[1] in (self.modes + "?"): + if request.extension in self.extensions: + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, random_access=None): + # Validate - we need a BSDF file consisting of a list of images + # The list is typically a stream, but does not have to be. + assert self.request.firstbytes[:4] == b"BSDF", "Not a BSDF file" + # self.request.firstbytes[5:6] == major and minor version + if not ( + self.request.firstbytes[6:15] == b"M\x07image2D" + or self.request.firstbytes[6:15] == b"M\x07image3D" + or self.request.firstbytes[6:7] == b"l" + ): + pass # Actually, follow a more duck-type approach ... + # raise RuntimeError('BSDF file does not look like an ' + # 'image container.') + # Set options. If we think that seeking is allowed, we lazily load + # blobs, and set streaming to False (i.e. the whole file is read, + # but we skip over binary blobs), so that we subsequently allow + # random access to the images. + # If seeking is not allowed (e.g. with a http request), we cannot + # lazily load blobs, but we can still load streaming from the web. + options = {} + if self.request.filename.startswith(("http://", "https://")): + ra = False if random_access is None else bool(random_access) + options["lazy_blob"] = False # Because we cannot seek now + options["load_streaming"] = not ra # Load as a stream? + else: + ra = True if random_access is None else bool(random_access) + options["lazy_blob"] = ra # Don't read data until needed + options["load_streaming"] = not ra + + file = self.request.get_file() + bsdf, self._serializer = get_bsdf_serializer(options) + self._stream = self._serializer.load(file) + # Another validation + if ( + isinstance(self._stream, dict) + and "meta" in self._stream + and "array" in self._stream + ): + self._stream = Image(self._stream["array"], self._stream["meta"]) + if not isinstance(self._stream, (Image, list, bsdf.ListStream)): + raise RuntimeError( + "BSDF file does not look seem to have an " "image container." + ) + + def _close(self): + pass + + def _get_length(self): + if isinstance(self._stream, Image): + return 1 + elif isinstance(self._stream, list): + return len(self._stream) + elif self._stream.count < 0: + return np.inf + return self._stream.count + + def _get_data(self, index): + # Validate + if index < 0 or index >= self.get_length(): + raise IndexError( + "Image index %i not in [0 %i]." % (index, self.get_length()) + ) + # Get Image object + if isinstance(self._stream, Image): + image_ob = self._stream # singleton + elif isinstance(self._stream, list): + # Easy when we have random access + image_ob = self._stream[index] + else: + # For streaming, we need to skip over frames + if index < self._stream.index: + raise IndexError( + "BSDF file is being read in streaming " + "mode, thus does not allow rewinding." + ) + while index > self._stream.index: + self._stream.next() + image_ob = self._stream.next() # Can raise StopIteration + # Is this an image? + if ( + isinstance(image_ob, dict) + and "meta" in image_ob + and "array" in image_ob + ): + image_ob = Image(image_ob["array"], image_ob["meta"]) + if isinstance(image_ob, Image): + # Return as array (if we have lazy blobs, they are read now) + return image_ob.get_array(), image_ob.get_meta() + else: + r = repr(image_ob) + r = r if len(r) < 200 else r[:197] + "..." + raise RuntimeError("BSDF file contains non-image " + r) + + def _get_meta_data(self, index): # pragma: no cover + return {} # This format does not support global meta data + + # -- writer + + class Writer(Format.Writer): + def _open(self, compression=1): + options = {"compression": compression} + bsdf, self._serializer = get_bsdf_serializer(options) + if self.request.mode[1] in "iv": + self._stream = None # Singleton image + self._written = False + else: + # Series (stream) of images + file = self.request.get_file() + self._stream = bsdf.ListStream() + self._serializer.save(file, self._stream) + + def _close(self): + # We close the stream here, which will mark the number of written + # elements. If we would not close it, the file would be fine, it's + # just that upon reading it would not be known how many items are + # in there. + if self._stream is not None: + self._stream.close(False) # False says "keep this a stream" + + def _append_data(self, im, meta): + # Determine dimension + ndim = None + if self.request.mode[1] in "iI": + ndim = 2 + elif self.request.mode[1] in "vV": + ndim = 3 + else: + ndim = 3 # Make an educated guess + if im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4): + ndim = 2 + # Validate shape + assert ndim in (2, 3) + if ndim == 2: + assert im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4) + else: + assert im.ndim == 3 or (im.ndim == 4 and im.shape[-1] <= 4) + # Wrap data and meta data in our special class that will trigger + # the BSDF image2D or image3D extension. + if ndim == 2: + ob = Image2D(im, meta) + else: + ob = Image3D(im, meta) + # Write directly or to stream + if self._stream is None: + assert not self._written, "Cannot write singleton image twice" + self._written = True + file = self.request.get_file() + self._serializer.save(file, ob) + else: + self._stream.append(ob) + + def set_meta_data(self, meta): # pragma: no cover + raise RuntimeError("The BSDF format only supports " "per-image meta data.") diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/dicom.py b/parrot/lib/python3.10/site-packages/imageio/plugins/dicom.py new file mode 100644 index 0000000000000000000000000000000000000000..c5f366449c9a707ad792cbe61c3e46f343e665ff --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/dicom.py @@ -0,0 +1,333 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read DICOM files. + +Backend Library: internal + +A format for reading DICOM images: a common format used to store +medical image data, such as X-ray, CT and MRI. + +This format borrows some code (and ideas) from the pydicom project. However, +only a predefined subset of tags are extracted from the file. This allows +for great simplifications allowing us to make a stand-alone reader, and +also results in a much faster read time. + +By default, only uncompressed and deflated transfer syntaxes are supported. +If gdcm or dcmtk is installed, these will be used to automatically convert +the data. See https://github.com/malaterre/GDCM/releases for installing GDCM. + +This format provides functionality to group images of the same +series together, thus extracting volumes (and multiple volumes). +Using volread will attempt to yield a volume. If multiple volumes +are present, the first one is given. Using mimread will simply yield +all images in the given directory (not taking series into account). + +Parameters +---------- +progress : {True, False, BaseProgressIndicator} + Whether to show progress when reading from multiple files. + Default True. By passing an object that inherits from + BaseProgressIndicator, the way in which progress is reported + can be costumized. + +""" + +# todo: Use pydicom: +# * Note: is not py3k ready yet +# * Allow reading the full meta info +# I think we can more or less replace the SimpleDicomReader with a +# pydicom.Dataset For series, only ned to read the full info from one +# file: speed still high +# * Perhaps allow writing? + +import os +import sys +import logging +import subprocess + +from ..core import Format, BaseProgressIndicator, StdoutProgressIndicator +from ..core import read_n_bytes + +_dicom = None # lazily loaded in load_lib() + +logger = logging.getLogger(__name__) + + +def load_lib(): + global _dicom + from . import _dicom + + return _dicom + + +# Determine endianity of system +sys_is_little_endian = sys.byteorder == "little" + + +def get_dcmdjpeg_exe(): + fname = "dcmdjpeg" + ".exe" * sys.platform.startswith("win") + for dir in ( + "c:\\dcmtk", + "c:\\Program Files", + "c:\\Program Files\\dcmtk", + "c:\\Program Files (x86)\\dcmtk", + ): + filename = os.path.join(dir, fname) + if os.path.isfile(filename): + return [filename] + + try: + subprocess.check_call([fname, "--version"]) + return [fname] + except Exception: + return None + + +def get_gdcmconv_exe(): + fname = "gdcmconv" + ".exe" * sys.platform.startswith("win") + # Maybe it's on the path + try: + subprocess.check_call([fname, "--version"]) + return [fname, "--raw"] + except Exception: + pass + # Select directories where it could be + candidates = [] + base_dirs = [r"c:\Program Files"] + for base_dir in base_dirs: + if os.path.isdir(base_dir): + for dname in os.listdir(base_dir): + if dname.lower().startswith("gdcm"): + suffix = dname[4:].strip() + candidates.append((suffix, os.path.join(base_dir, dname))) + # Sort, so higher versions are tried earlier + candidates.sort(reverse=True) + # Select executable + filename = None + for _, dirname in candidates: + exe1 = os.path.join(dirname, "gdcmconv.exe") + exe2 = os.path.join(dirname, "bin", "gdcmconv.exe") + if os.path.isfile(exe1): + filename = exe1 + break + if os.path.isfile(exe2): + filename = exe2 + break + else: + return None + return [filename, "--raw"] + + +class DicomFormat(Format): + """See :mod:`imageio.plugins.dicom`""" + + def _can_read(self, request): + # If user URI was a directory, we check whether it has a DICOM file + if os.path.isdir(request.filename): + files = os.listdir(request.filename) + for fname in sorted(files): # Sorting make it consistent + filename = os.path.join(request.filename, fname) + if os.path.isfile(filename) and "DICOMDIR" not in fname: + with open(filename, "rb") as f: + first_bytes = read_n_bytes(f, 140) + return first_bytes[128:132] == b"DICM" + else: + return False + # Check + return request.firstbytes[128:132] == b"DICM" + + def _can_write(self, request): + # We cannot save yet. May be possible if we will used pydicom as + # a backend. + return False + + # -- + + class Reader(Format.Reader): + _compressed_warning_dirs = set() + + def _open(self, progress=True): + if not _dicom: + load_lib() + if os.path.isdir(self.request.filename): + # A dir can be given if the user used the format explicitly + self._info = {} + self._data = None + else: + # Read the given dataset now ... + try: + dcm = _dicom.SimpleDicomReader(self.request.get_file()) + except _dicom.CompressedDicom as err: + # We cannot do this on our own. Perhaps with some help ... + cmd = get_gdcmconv_exe() + if not cmd and "JPEG" in str(err): + cmd = get_dcmdjpeg_exe() + if not cmd: + msg = err.args[0].replace("using", "installing") + msg = msg.replace("convert", "auto-convert") + err.args = (msg,) + raise + else: + fname1 = self.request.get_local_filename() + fname2 = fname1 + ".raw" + try: + subprocess.check_call(cmd + [fname1, fname2]) + except Exception: + raise err + d = os.path.dirname(fname1) + if d not in self._compressed_warning_dirs: + self._compressed_warning_dirs.add(d) + logger.warning( + "DICOM file contained compressed data. " + + "Autoconverting with " + + cmd[0] + + " (this warning is shown once for each directory)" + ) + dcm = _dicom.SimpleDicomReader(fname2) + + self._info = dcm._info + self._data = dcm.get_numpy_array() + + # Initialize series, list of DicomSeries objects + self._series = None # only created if needed + + # Set progress indicator + if isinstance(progress, BaseProgressIndicator): + self._progressIndicator = progress + elif progress is True: + p = StdoutProgressIndicator("Reading DICOM") + self._progressIndicator = p + elif progress in (None, False): + self._progressIndicator = BaseProgressIndicator("Dummy") + else: + raise ValueError("Invalid value for progress.") + + def _close(self): + # Clean up + self._info = None + self._data = None + self._series = None + + @property + def series(self): + if self._series is None: + pi = self._progressIndicator + self._series = _dicom.process_directory(self.request, pi) + return self._series + + def _get_length(self): + if self._data is None: + dcm = self.series[0][0] + self._info = dcm._info + self._data = dcm.get_numpy_array() + + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + if self.request.mode[1] == "i": + # User expects one, but lets be honest about this file + return nslices + elif self.request.mode[1] == "I": + # User expects multiple, if this file has multiple slices, ok. + # Otherwise we have to check the series. + if nslices > 1: + return nslices + else: + return sum([len(serie) for serie in self.series]) + elif self.request.mode[1] == "v": + # User expects a volume, if this file has one, ok. + # Otherwise we have to check the series + if nslices > 1: + return 1 + else: + return len(self.series) # We assume one volume per series + elif self.request.mode[1] == "V": + # User expects multiple volumes. We have to check the series + return len(self.series) # We assume one volume per series + else: + raise RuntimeError("DICOM plugin should know what to expect.") + + def _get_slice_data(self, index): + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + # Allow index >1 only if this file contains >1 + if nslices > 1: + return self._data[index], self._info + elif index == 0: + return self._data, self._info + else: + raise IndexError("Dicom file contains only one slice.") + + def _get_data(self, index): + if self._data is None: + dcm = self.series[0][0] + self._info = dcm._info + self._data = dcm.get_numpy_array() + + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + if self.request.mode[1] == "i": + return self._get_slice_data(index) + elif self.request.mode[1] == "I": + # Return slice from volume, or return item from series + if index == 0 and nslices > 1: + return self._data[index], self._info + else: + L = [] + for serie in self.series: + L.extend([dcm_ for dcm_ in serie]) + return L[index].get_numpy_array(), L[index].info + elif self.request.mode[1] in "vV": + # Return volume or series + if index == 0 and nslices > 1: + return self._data, self._info + else: + return ( + self.series[index].get_numpy_array(), + self.series[index].info, + ) + # mode is `?` (typically because we are using V3). If there is a + # series (multiple files), index referrs to the element of the + # series and we read volumes. If there is no series, index + # referrs to the slice in the volume we read "flat" images. + elif len(self.series) > 1: + # mode is `?` and there are multiple series. Each series is a ndimage. + return ( + self.series[index].get_numpy_array(), + self.series[index].info, + ) + else: + # mode is `?` and there is only one series. Each slice is an ndimage. + return self._get_slice_data(index) + + def _get_meta_data(self, index): + if self._data is None: + dcm = self.series[0][0] + self._info = dcm._info + self._data = dcm.get_numpy_array() + + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + # Default is the meta data of the given file, or the "first" file. + if index is None: + return self._info + + if self.request.mode[1] == "i": + return self._info + elif self.request.mode[1] == "I": + # Return slice from volume, or return item from series + if index == 0 and nslices > 1: + return self._info + else: + L = [] + for serie in self.series: + L.extend([dcm_ for dcm_ in serie]) + return L[index].info + elif self.request.mode[1] in "vV": + # Return volume or series + if index == 0 and nslices > 1: + return self._info + else: + return self.series[index].info + else: # pragma: no cover + raise ValueError("DICOM plugin should know what to expect.") diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/example.py b/parrot/lib/python3.10/site-packages/imageio/plugins/example.py new file mode 100644 index 0000000000000000000000000000000000000000..b7cf8b9b3d01229c71cacf1cb86c920960aa6ec4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/example.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Example plugin. You can use this as a template for your own plugin. +""" + +import numpy as np + +from .. import formats +from ..core import Format + + +class DummyFormat(Format): + """The dummy format is an example format that does nothing. + It will never indicate that it can read or write a file. When + explicitly asked to read, it will simply read the bytes. When + explicitly asked to write, it will raise an error. + + This documentation is shown when the user does ``help('thisformat')``. + + Parameters for reading + ---------------------- + Specify arguments in numpy doc style here. + + Parameters for saving + --------------------- + Specify arguments in numpy doc style here. + + """ + + def _can_read(self, request): + # This method is called when the format manager is searching + # for a format to read a certain image. Return True if this format + # can do it. + # + # The format manager is aware of the extensions and the modes + # that each format can handle. It will first ask all formats + # that *seem* to be able to read it whether they can. If none + # can, it will ask the remaining formats if they can: the + # extension might be missing, and this allows formats to provide + # functionality for certain extensions, while giving preference + # to other plugins. + # + # If a format says it can, it should live up to it. The format + # would ideally check the request.firstbytes and look for a + # header of some kind. + # + # The request object has: + # request.filename: a representation of the source (only for reporting) + # request.firstbytes: the first 256 bytes of the file. + # request.mode[0]: read or write mode + + if request.extension in self.extensions: + return True + + def _can_write(self, request): + # This method is called when the format manager is searching + # for a format to write a certain image. It will first ask all + # formats that *seem* to be able to write it whether they can. + # If none can, it will ask the remaining formats if they can. + # + # Return True if the format can do it. + + # In most cases, this code does suffice: + if request.extension in self.extensions: + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, some_option=False, length=1): + # Specify kwargs here. Optionally, the user-specified kwargs + # can also be accessed via the request.kwargs object. + # + # The request object provides two ways to get access to the + # data. Use just one: + # - Use request.get_file() for a file object (preferred) + # - Use request.get_local_filename() for a file on the system + self._fp = self.request.get_file() + self._length = length # passed as an arg in this case for testing + self._data = None + + def _close(self): + # Close the reader. + # Note that the request object will close self._fp + pass + + def _get_length(self): + # Return the number of images. Can be np.inf + return self._length + + def _get_data(self, index): + # Return the data and meta data for the given index + if index >= self._length: + raise IndexError("Image index %i > %i" % (index, self._length)) + # Read all bytes + if self._data is None: + self._data = self._fp.read() + # Put in a numpy array + im = np.frombuffer(self._data, "uint8") + im.shape = len(im), 1 + # Return array and dummy meta data + return im, {} + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, it + # should return the global meta data. + return {} # This format does not support meta data + + # -- writer + + class Writer(Format.Writer): + def _open(self, flags=0): + # Specify kwargs here. Optionally, the user-specified kwargs + # can also be accessed via the request.kwargs object. + # + # The request object provides two ways to write the data. + # Use just one: + # - Use request.get_file() for a file object (preferred) + # - Use request.get_local_filename() for a file on the system + self._fp = self.request.get_file() + + def _close(self): + # Close the reader. + # Note that the request object will close self._fp + pass + + def _append_data(self, im, meta): + # Process the given data and meta data. + raise RuntimeError("The dummy format cannot write image data.") + + def set_meta_data(self, meta): + # Process the given meta data (global for all images) + # It is not mandatory to support this. + raise RuntimeError("The dummy format cannot write meta data.") + + +# Register. You register an *instance* of a Format class. Here specify: +format = DummyFormat( + "dummy", # short name + "An example format that does nothing.", # one line descr. + ".foobar .nonexistentext", # list of extensions + "iI", # modes, characters in iIvV +) +formats.add_format(format) diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/feisem.py b/parrot/lib/python3.10/site-packages/imageio/plugins/feisem.py new file mode 100644 index 0000000000000000000000000000000000000000..af50768a3c5a904bd5fde6ea98feb942fba4dc10 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/feisem.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read TIFF from FEI SEM microscopes. + +Backend Library: internal + +This format is based on :mod:`TIFF `, and supports the +same parameters. FEI microscopes append metadata as ASCII text at the end of the +file, which this reader correctly extracts. + +Parameters +---------- +discard_watermark : bool + If True (default), discard the bottom rows of the image, which + contain no image data, only a watermark with metadata. +watermark_height : int + The height in pixels of the FEI watermark. The default is 70. + +See Also +-------- + :mod:`imageio.plugins.tifffile` + +""" + + +from .tifffile import TiffFormat + + +class FEISEMFormat(TiffFormat): + """See :mod:`imageio.plugins.feisem`""" + + def _can_write(self, request): + return False # FEI-SEM only supports reading + + class Reader(TiffFormat.Reader): + def _get_data(self, index=0, discard_watermark=True, watermark_height=70): + """Get image and metadata from given index. + + FEI images usually (always?) contain a watermark at the + bottom of the image, 70 pixels high. We discard this by + default as it does not contain any information not present + in the metadata. + """ + im, meta = super(FEISEMFormat.Reader, self)._get_data(index) + if discard_watermark: + im = im[:-watermark_height] + return im, meta + + def _get_meta_data(self, index=None): + """Read the metadata from an FEI SEM TIFF. + + This metadata is included as ASCII text at the end of the file. + + The index, if provided, is ignored. + + Returns + ------- + metadata : dict + Dictionary of metadata. + """ + if hasattr(self, "_fei_meta"): + return self._fei_meta + + md = {"root": {}} + current_tag = "root" + reading_metadata = False + filename = self.request.get_local_filename() + with open(filename, encoding="utf8", errors="ignore") as fin: + for line in fin: + if not reading_metadata: + if not line.startswith("Date="): + continue + else: + reading_metadata = True + line = line.rstrip() + if line.startswith("["): + current_tag = line.lstrip("[").rstrip("]") + md[current_tag] = {} + else: + if "=" in line: # ignore empty and irrelevant lines + key, val = line.split("=", maxsplit=1) + for tag_type in (int, float): + try: + val = tag_type(val) + except ValueError: + continue + else: + break + md[current_tag][key] = val + if not md["root"] and len(md) == 1: + raise ValueError("Input file %s contains no FEI metadata." % filename) + + self._fei_meta = md + return md diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/ffmpeg.py b/parrot/lib/python3.10/site-packages/imageio/plugins/ffmpeg.py new file mode 100644 index 0000000000000000000000000000000000000000..ce47323b517c8949762515afd3fee3dff4dca90e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/ffmpeg.py @@ -0,0 +1,729 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read/Write video using FFMPEG + +.. note:: + We are in the process of (slowly) replacing this plugin with a new one that + is based on `pyav `_. It is faster and more + flexible than the plugin documented here. Check the :mod:`pyav + plugin's documentation ` for more information about + this plugin. + +Backend Library: https://github.com/imageio/imageio-ffmpeg + +.. note:: + To use this plugin you have to install its backend:: + + pip install imageio[ffmpeg] + + +The ffmpeg format provides reading and writing for a wide range of movie formats +such as .avi, .mpeg, .mp4, etc. as well as the ability to read streams from +webcams and USB cameras. It is based on ffmpeg and is inspired by/based `moviepy +`_ by Zulko. + +Parameters for reading +---------------------- +fps : scalar + The number of frames per second of the input stream. Default None (i.e. + read at the file's native fps). One can use this for files with a + variable fps, or in cases where imageio is unable to correctly detect + the fps. In case of trouble opening camera streams, it may help to set an + explicit fps value matching a framerate supported by the camera. +loop : bool + If True, the video will rewind as soon as a frame is requested + beyond the last frame. Otherwise, IndexError is raised. Default False. + Setting this to True will internally call ``count_frames()``, + and set the reader's length to that value instead of inf. +size : str | tuple + The frame size (i.e. resolution) to read the images, e.g. + (100, 100) or "640x480". For camera streams, this allows setting + the capture resolution. For normal video data, ffmpeg will + rescale the data. +dtype : str | type + The dtype for the output arrays. Determines the bit-depth that + is requested from ffmpeg. Supported dtypes: uint8, uint16. + Default: uint8. +pixelformat : str + The pixel format for the camera to use (e.g. "yuyv422" or + "gray"). The camera needs to support the format in order for + this to take effect. Note that the images produced by this + reader are always RGB. +input_params : list + List additional arguments to ffmpeg for input file options. + (Can also be provided as ``ffmpeg_params`` for backwards compatibility) + Example ffmpeg arguments to use aggressive error handling: + ['-err_detect', 'aggressive'] +output_params : list + List additional arguments to ffmpeg for output file options (i.e. the + stream being read by imageio). +print_info : bool + Print information about the video file as reported by ffmpeg. + +Parameters for writing +---------------------- +fps : scalar + The number of frames per second. Default 10. +codec : str + the video codec to use. Default 'libx264', which represents the + widely available mpeg4. Except when saving .wmv files, then the + defaults is 'msmpeg4' which is more commonly supported for windows +quality : float | None + Video output quality. Default is 5. Uses variable bit rate. Highest + quality is 10, lowest is 0. Set to None to prevent variable bitrate + flags to FFMPEG so you can manually specify them using output_params + instead. Specifying a fixed bitrate using 'bitrate' disables this + parameter. +bitrate : int | None + Set a constant bitrate for the video encoding. Default is None causing + 'quality' parameter to be used instead. Better quality videos with + smaller file sizes will result from using the 'quality' variable + bitrate parameter rather than specifying a fixed bitrate with this + parameter. +pixelformat: str + The output video pixel format. Default is 'yuv420p' which most widely + supported by video players. +input_params : list + List additional arguments to ffmpeg for input file options (i.e. the + stream that imageio provides). +output_params : list + List additional arguments to ffmpeg for output file options. + (Can also be provided as ``ffmpeg_params`` for backwards compatibility) + Example ffmpeg arguments to use only intra frames and set aspect ratio: + ['-intra', '-aspect', '16:9'] +ffmpeg_log_level: str + Sets ffmpeg output log level. Default is "warning". + Values can be "quiet", "panic", "fatal", "error", "warning", "info" + "verbose", or "debug". Also prints the FFMPEG command being used by + imageio if "info", "verbose", or "debug". +macro_block_size: int + Size constraint for video. Width and height, must be divisible by this + number. If not divisible by this number imageio will tell ffmpeg to + scale the image up to the next closest size + divisible by this number. Most codecs are compatible with a macroblock + size of 16 (default), some can go smaller (4, 8). To disable this + automatic feature set it to None or 1, however be warned many players + can't decode videos that are odd in size and some codecs will produce + poor results or fail. See https://en.wikipedia.org/wiki/Macroblock. +audio_path : str | None + Audio path of any audio that needs to be written. Defaults to nothing, + so no audio will be written. Please note, when writing shorter video + than the original, ffmpeg will not truncate the audio track; it + will maintain its original length and be longer than the video. +audio_codec : str | None + The audio codec to use. Defaults to nothing, but if an audio_path has + been provided ffmpeg will attempt to set a default codec. + +Notes +----- +If you are using anaconda and ``anaconda/ffmpeg`` you will not be able to +encode/decode H.264 (likely due to licensing concerns). If you need this +format on anaconda install ``conda-forge/ffmpeg`` instead. + +You can use the ``IMAGEIO_FFMPEG_EXE`` environment variable to force using a +specific ffmpeg executable. + +To get the number of frames before having read them all, you can use the +``reader.count_frames()`` method (the reader will then use +``imageio_ffmpeg.count_frames_and_secs()`` to get the exact number of frames, +note that this operation can take a few seconds on large files). Alternatively, +the number of frames can be estimated from the fps and duration in the meta data +(though these values themselves are not always present/reliable). + +""" + +import re +import sys +import time +import logging +import platform +import threading +import subprocess as sp +import imageio_ffmpeg + +import numpy as np + +from ..core import Format, image_as_uint + +logger = logging.getLogger(__name__) + +# Get camera format +if sys.platform.startswith("win"): + CAM_FORMAT = "dshow" # dshow or vfwcap +elif sys.platform.startswith("linux"): + CAM_FORMAT = "video4linux2" +elif sys.platform.startswith("darwin"): + CAM_FORMAT = "avfoundation" +else: # pragma: no cover + CAM_FORMAT = "unknown-cam-format" + + +def download(directory=None, force_download=False): # pragma: no cover + raise RuntimeError( + "imageio.ffmpeg.download() has been deprecated. " + "Use 'pip install imageio-ffmpeg' instead.'" + ) + + +# For backwards compatibility - we dont use this ourselves +def get_exe(): # pragma: no cover + """Wrapper for imageio_ffmpeg.get_ffmpeg_exe()""" + + return imageio_ffmpeg.get_ffmpeg_exe() + + +class FfmpegFormat(Format): + """Read/Write ImageResources using FFMPEG. + + See :mod:`imageio.plugins.ffmpeg` + """ + + def _can_read(self, request): + # Read from video stream? + # Note that we could write the _video flag here, but a user might + # select this format explicitly (and this code is not run) + if re.match(r"", request.filename): + return True + + # Read from file that we know? + if request.extension in self.extensions: + return True + + def _can_write(self, request): + if request.extension in self.extensions: + return True + + # -- + + class Reader(Format.Reader): + _frame_catcher = None + _read_gen = None + + def _get_cam_inputname(self, index): + if sys.platform.startswith("linux"): + return "/dev/" + self.request._video[1:-1] + + elif sys.platform.startswith("win"): + # Ask ffmpeg for list of dshow device names + ffmpeg_api = imageio_ffmpeg + cmd = [ + ffmpeg_api.get_ffmpeg_exe(), + "-list_devices", + "true", + "-f", + CAM_FORMAT, + "-i", + "dummy", + ] + # Set `shell=True` in sp.run to prevent popup of a command + # line window in frozen applications. Note: this would be a + # security vulnerability if user-input goes into the cmd. + # Note that the ffmpeg process returns with exit code 1 when + # using `-list_devices` (or `-list_options`), even if the + # command is successful, so we set `check=False` explicitly. + completed_process = sp.run( + cmd, + stdout=sp.PIPE, + stderr=sp.PIPE, + encoding="utf-8", + shell=True, + check=False, + ) + + # Return device name at index + try: + name = parse_device_names(completed_process.stderr)[index] + except IndexError: + raise IndexError("No ffdshow camera at index %i." % index) + return "video=%s" % name + + elif sys.platform.startswith("darwin"): + # Appears that newer ffmpeg builds don't support -list-devices + # on OS X. But you can directly open the camera by index. + name = str(index) + return name + + else: # pragma: no cover + return "??" + + def _open( + self, + loop=False, + size=None, + dtype=None, + pixelformat=None, + print_info=False, + ffmpeg_params=None, + input_params=None, + output_params=None, + fps=None, + ): + # Get generator functions + self._ffmpeg_api = imageio_ffmpeg + # Process input args + self._arg_loop = bool(loop) + if size is None: + self._arg_size = None + elif isinstance(size, tuple): + self._arg_size = "%ix%i" % size + elif isinstance(size, str) and "x" in size: + self._arg_size = size + else: + raise ValueError('FFMPEG size must be tuple of "NxM"') + if pixelformat is None: + pass + elif not isinstance(pixelformat, str): + raise ValueError("FFMPEG pixelformat must be str") + if dtype is None: + self._dtype = np.dtype("uint8") + else: + self._dtype = np.dtype(dtype) + allowed_dtypes = ["uint8", "uint16"] + if self._dtype.name not in allowed_dtypes: + raise ValueError( + "dtype must be one of: {}".format(", ".join(allowed_dtypes)) + ) + self._arg_pixelformat = pixelformat + self._arg_input_params = input_params or [] + self._arg_output_params = output_params or [] + self._arg_input_params += ffmpeg_params or [] # backward compat + # Write "_video"_arg - indicating webcam support + self.request._video = None + regex_match = re.match(r"", self.request.filename) + if regex_match: + self.request._video = self.request.filename + # Get local filename + if self.request._video: + index = int(regex_match.group(1)) + self._filename = self._get_cam_inputname(index) + else: + self._filename = self.request.get_local_filename() + # When passed to ffmpeg on command line, carets need to be escaped. + self._filename = self._filename.replace("^", "^^") + # Determine pixel format and depth + self._depth = 3 + if self._dtype.name == "uint8": + self._pix_fmt = "rgb24" + self._bytes_per_channel = 1 + else: + self._pix_fmt = "rgb48le" + self._bytes_per_channel = 2 + # Initialize parameters + self._pos = -1 + self._meta = {"plugin": "ffmpeg"} + self._lastread = None + + # Calculating this from fps and duration is not accurate, + # and calculating it exactly with ffmpeg_api.count_frames_and_secs + # takes too long to do for each video. But we need it for looping. + self._nframes = float("inf") + if self._arg_loop and not self.request._video: + self._nframes = self.count_frames() + self._meta["nframes"] = self._nframes + + # Specify input framerate? (only on macOS) + # Ideally we'd get the supported framerate from the metadata, but we get the + # metadata when we boot ffmpeg ... maybe we could refactor this so we can + # get the metadata beforehand, but for now we'll just give it 2 tries on MacOS, + # one with fps 30 and one with fps 15. + need_input_fps = need_output_fps = False + if self.request._video and platform.system().lower() == "darwin": + if "-framerate" not in str(self._arg_input_params): + need_input_fps = True + if not self.request.kwargs.get("fps", None): + need_output_fps = True + if need_input_fps: + self._arg_input_params.extend(["-framerate", str(float(30))]) + if need_output_fps: + self._arg_output_params.extend(["-r", str(float(30))]) + + # Start ffmpeg subprocess and get meta information + try: + self._initialize() + except IndexError: + # Specify input framerate again, this time different. + if need_input_fps: + self._arg_input_params[-1] = str(float(15)) + self._initialize() + else: + raise + + # For cameras, create thread that keeps reading the images + if self.request._video: + self._frame_catcher = FrameCatcher(self._read_gen) + + # For reference - but disabled, because it is inaccurate + # if self._meta["nframes"] == float("inf"): + # if self._meta.get("fps", 0) > 0: + # if self._meta.get("duration", 0) > 0: + # n = round(self._meta["duration"] * self._meta["fps"]) + # self._meta["nframes"] = int(n) + + def _close(self): + # First close the frame catcher, because we cannot close the gen + # if the frame catcher thread is using it + if self._frame_catcher is not None: + self._frame_catcher.stop_me() + self._frame_catcher = None + if self._read_gen is not None: + self._read_gen.close() + self._read_gen = None + + def count_frames(self): + """Count the number of frames. Note that this can take a few + seconds for large files. Also note that it counts the number + of frames in the original video and does not take a given fps + into account. + """ + # This would have been nice, but this does not work :( + # oargs = [] + # if self.request.kwargs.get("fps", None): + # fps = float(self.request.kwargs["fps"]) + # oargs += ["-r", "%.02f" % fps] + cf = self._ffmpeg_api.count_frames_and_secs + return cf(self._filename)[0] + + def _get_length(self): + return self._nframes # only not inf if loop is True + + def _get_data(self, index): + """Reads a frame at index. Note for coders: getting an + arbitrary frame in the video with ffmpeg can be painfully + slow if some decoding has to be done. This function tries + to avoid fectching arbitrary frames whenever possible, by + moving between adjacent frames.""" + # Modulo index (for looping) + if self._arg_loop and self._nframes < float("inf"): + index %= self._nframes + + if index == self._pos: + return self._lastread, dict(new=False) + elif index < 0: + raise IndexError("Frame index must be >= 0") + elif index >= self._nframes: + raise IndexError("Reached end of video") + else: + if (index < self._pos) or (index > self._pos + 100): + self._initialize(index) + else: + self._skip_frames(index - self._pos - 1) + result, is_new = self._read_frame() + self._pos = index + return result, dict(new=is_new) + + def _get_meta_data(self, index): + return self._meta + + def _initialize(self, index=0): + # Close the current generator, and thereby terminate its subprocess + if self._read_gen is not None: + self._read_gen.close() + + iargs = [] + oargs = [] + + # Create input args + iargs += self._arg_input_params + if self.request._video: + iargs += ["-f", CAM_FORMAT] + if self._arg_pixelformat: + iargs += ["-pix_fmt", self._arg_pixelformat] + if self._arg_size: + iargs += ["-s", self._arg_size] + elif index > 0: # re-initialize / seek + # Note: only works if we initialized earlier, and now have meta + # Some info here: https://trac.ffmpeg.org/wiki/Seeking + # There are two ways to seek, one before -i (input_params) and + # after (output_params). The former is fast, because it uses + # keyframes, the latter is slow but accurate. According to + # the article above, the fast method should also be accurate + # from ffmpeg version 2.1, however in version 4.1 our tests + # start failing again. Not sure why, but we can solve this + # by combining slow and fast. Seek the long stretch using + # the fast method, and seek the last 10s the slow way. + starttime = index / self._meta["fps"] + seek_slow = min(10, starttime) + seek_fast = starttime - seek_slow + # We used to have this epsilon earlier, when we did not use + # the slow seek. I don't think we need it anymore. + # epsilon = -1 / self._meta["fps"] * 0.1 + iargs += ["-ss", "%.06f" % (seek_fast)] + oargs += ["-ss", "%.06f" % (seek_slow)] + + # Output args, for writing to pipe + if self._arg_size: + oargs += ["-s", self._arg_size] + if self.request.kwargs.get("fps", None): + fps = float(self.request.kwargs["fps"]) + oargs += ["-r", "%.02f" % fps] + oargs += self._arg_output_params + + # Get pixelformat and bytes per pixel + pix_fmt = self._pix_fmt + bpp = self._depth * self._bytes_per_channel + + # Create generator + rf = self._ffmpeg_api.read_frames + self._read_gen = rf( + self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs + ) + + # Read meta data. This start the generator (and ffmpeg subprocess) + if self.request._video: + # With cameras, catch error and turn into IndexError + try: + meta = self._read_gen.__next__() + except IOError as err: + err_text = str(err) + if "darwin" in sys.platform: + if "Unknown input format: 'avfoundation'" in err_text: + err_text += ( + "Try installing FFMPEG using " + "home brew to get a version with " + "support for cameras." + ) + raise IndexError( + "No (working) camera at {}.\n\n{}".format( + self.request._video, err_text + ) + ) + else: + self._meta.update(meta) + elif index == 0: + self._meta.update(self._read_gen.__next__()) + else: + self._read_gen.__next__() # we already have meta data + + def _skip_frames(self, n=1): + """Reads and throws away n frames""" + for i in range(n): + self._read_gen.__next__() + self._pos += n + + def _read_frame(self): + # Read and convert to numpy array + w, h = self._meta["size"] + framesize = w * h * self._depth * self._bytes_per_channel + # t0 = time.time() + + # Read frame + if self._frame_catcher: # pragma: no cover - camera thing + s, is_new = self._frame_catcher.get_frame() + else: + s = self._read_gen.__next__() + is_new = True + + # Check + if len(s) != framesize: + raise RuntimeError( + "Frame is %i bytes, but expected %i." % (len(s), framesize) + ) + + result = np.frombuffer(s, dtype=self._dtype).copy() + result = result.reshape((h, w, self._depth)) + # t1 = time.time() + # print('etime', t1-t0) + + # Store and return + self._lastread = result + return result, is_new + + # -- + + class Writer(Format.Writer): + _write_gen = None + + def _open( + self, + fps=10, + codec="libx264", + bitrate=None, + pixelformat="yuv420p", + ffmpeg_params=None, + input_params=None, + output_params=None, + ffmpeg_log_level="quiet", + quality=5, + macro_block_size=16, + audio_path=None, + audio_codec=None, + ): + self._ffmpeg_api = imageio_ffmpeg + self._filename = self.request.get_local_filename() + self._pix_fmt = None + self._depth = None + self._size = None + + def _close(self): + if self._write_gen is not None: + self._write_gen.close() + self._write_gen = None + + def _append_data(self, im, meta): + # Get props of image + h, w = im.shape[:2] + size = w, h + depth = 1 if im.ndim == 2 else im.shape[2] + + # Ensure that image is in uint8 + im = image_as_uint(im, bitdepth=8) + # To be written efficiently, ie. without creating an immutable + # buffer, by calling im.tobytes() the array must be contiguous. + if not im.flags.c_contiguous: + # checkign the flag is a micro optimization. + # the image will be a numpy subclass. See discussion + # https://github.com/numpy/numpy/issues/11804 + im = np.ascontiguousarray(im) + + # Set size and initialize if not initialized yet + if self._size is None: + map = {1: "gray", 2: "gray8a", 3: "rgb24", 4: "rgba"} + self._pix_fmt = map.get(depth, None) + if self._pix_fmt is None: + raise ValueError("Image must have 1, 2, 3 or 4 channels") + self._size = size + self._depth = depth + self._initialize() + + # Check size of image + if size != self._size: + raise ValueError("All images in a movie should have same size") + if depth != self._depth: + raise ValueError( + "All images in a movie should have same " "number of channels" + ) + + assert self._write_gen is not None # Check status + + # Write. Yes, we can send the data in as a numpy array + self._write_gen.send(im) + + def set_meta_data(self, meta): + raise RuntimeError( + "The ffmpeg format does not support setting " "meta data." + ) + + def _initialize(self): + # Close existing generator + if self._write_gen is not None: + self._write_gen.close() + + # Get parameters + # Use None to let imageio-ffmpeg (or ffmpeg) select good results + fps = self.request.kwargs.get("fps", 10) + codec = self.request.kwargs.get("codec", None) + bitrate = self.request.kwargs.get("bitrate", None) + quality = self.request.kwargs.get("quality", None) + input_params = self.request.kwargs.get("input_params") or [] + output_params = self.request.kwargs.get("output_params") or [] + output_params += self.request.kwargs.get("ffmpeg_params") or [] + pixelformat = self.request.kwargs.get("pixelformat", None) + macro_block_size = self.request.kwargs.get("macro_block_size", 16) + ffmpeg_log_level = self.request.kwargs.get("ffmpeg_log_level", None) + audio_path = self.request.kwargs.get("audio_path", None) + audio_codec = self.request.kwargs.get("audio_codec", None) + + macro_block_size = macro_block_size or 1 # None -> 1 + + # Create generator + self._write_gen = self._ffmpeg_api.write_frames( + self._filename, + self._size, + pix_fmt_in=self._pix_fmt, + pix_fmt_out=pixelformat, + fps=fps, + quality=quality, + bitrate=bitrate, + codec=codec, + macro_block_size=macro_block_size, + ffmpeg_log_level=ffmpeg_log_level, + input_params=input_params, + output_params=output_params, + audio_path=audio_path, + audio_codec=audio_codec, + ) + + # Seed the generator (this is where the ffmpeg subprocess starts) + self._write_gen.send(None) + + +class FrameCatcher(threading.Thread): + """Thread to keep reading the frame data from stdout. This is + useful when streaming from a webcam. Otherwise, if the user code + does not grab frames fast enough, the buffer will fill up, leading + to lag, and ffmpeg can also stall (experienced on Linux). The + get_frame() method always returns the last available image. + """ + + def __init__(self, gen): + self._gen = gen + self._frame = None + self._frame_is_new = False + self._lock = threading.RLock() + threading.Thread.__init__(self) + self.daemon = True # do not let this thread hold up Python shutdown + self._should_stop = False + self.start() + + def stop_me(self): + self._should_stop = True + while self.is_alive(): + time.sleep(0.001) + + def get_frame(self): + while self._frame is None: # pragma: no cover - an init thing + time.sleep(0.001) + with self._lock: + is_new = self._frame_is_new + self._frame_is_new = False # reset + return self._frame, is_new + + def run(self): + # This runs in the worker thread + try: + while not self._should_stop: + time.sleep(0) # give control to other threads + frame = self._gen.__next__() + with self._lock: + self._frame = frame + self._frame_is_new = True + except (StopIteration, EOFError): + pass + + +def parse_device_names(ffmpeg_output): + """Parse the output of the ffmpeg -list-devices command""" + # Collect device names - get [friendly_name, alt_name] of each + device_names = [] + in_video_devices = False + for line in ffmpeg_output.splitlines(): + if line.startswith("[dshow"): + logger.debug(line) + line = line.split("]", 1)[1].strip() + if in_video_devices and line.startswith('"'): + friendly_name = line[1:-1] + device_names.append([friendly_name, ""]) + elif in_video_devices and line.lower().startswith("alternative name"): + alt_name = line.split(" name ", 1)[1].strip()[1:-1] + if sys.platform.startswith("win"): + alt_name = alt_name.replace("&", "^&") # Tested to work + else: + alt_name = alt_name.replace("&", "\\&") # Does this work? + device_names[-1][-1] = alt_name + elif "video devices" in line: + in_video_devices = True + elif "devices" in line: + # set False for subsequent "devices" sections + in_video_devices = False + # Post-process, see #441 + # prefer friendly names, use alt name if two cams have same friendly name + device_names2 = [] + for friendly_name, alt_name in device_names: + if friendly_name not in device_names2: + device_names2.append(friendly_name) + elif alt_name: + device_names2.append(alt_name) + else: + device_names2.append(friendly_name) # duplicate, but not much we can do + return device_names2 diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/fits.py b/parrot/lib/python3.10/site-packages/imageio/plugins/fits.py new file mode 100644 index 0000000000000000000000000000000000000000..4617d1ea8c0cc2ce351151e700ff14651102685d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/fits.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read FITS files. + +Backend Library: `Astropy `_ + +.. note:: + To use this plugin you have to install its backend:: + + pip install imageio[fits] + +Flexible Image Transport System (FITS) is an open standard defining a +digital file format useful for storage, transmission and processing of +scientific and other images. FITS is the most commonly used digital +file format in astronomy. + + +Parameters +---------- +cache : bool + If the file name is a URL, `~astropy.utils.data.download_file` is used + to open the file. This specifies whether or not to save the file + locally in Astropy's download cache (default: `True`). +uint : bool + Interpret signed integer data where ``BZERO`` is the + central value and ``BSCALE == 1`` as unsigned integer + data. For example, ``int16`` data with ``BZERO = 32768`` + and ``BSCALE = 1`` would be treated as ``uint16`` data. + + Note, for backward compatibility, the kwarg **uint16** may + be used instead. The kwarg was renamed when support was + added for integers of any size. +ignore_missing_end : bool + Do not issue an exception when opening a file that is + missing an ``END`` card in the last header. +checksum : bool or str + If `True`, verifies that both ``DATASUM`` and + ``CHECKSUM`` card values (when present in the HDU header) + match the header and data of all HDU's in the file. Updates to a + file that already has a checksum will preserve and update the + existing checksums unless this argument is given a value of + 'remove', in which case the CHECKSUM and DATASUM values are not + checked, and are removed when saving changes to the file. +disable_image_compression : bool, optional + If `True`, treats compressed image HDU's like normal + binary table HDU's. +do_not_scale_image_data : bool + If `True`, image data is not scaled using BSCALE/BZERO values + when read. +ignore_blank : bool + If `True`, the BLANK keyword is ignored if present. +scale_back : bool + If `True`, when saving changes to a file that contained scaled + image data, restore the data to the original type and reapply the + original BSCALE/BZERO values. This could lead to loss of accuracy + if scaling back to integer values after performing floating point + operations on the data. + +""" + +from ..core import Format + +_fits = None # lazily loaded + + +def load_lib(): + global _fits + try: + from astropy.io import fits as _fits + except ImportError: + raise ImportError( + "The FITS format relies on the astropy package." + "Please refer to http://www.astropy.org/ " + "for further instructions." + ) + return _fits + + +class FitsFormat(Format): + """See :mod:`imageio.plugins.fits`""" + + def _can_read(self, request): + # We return True if ext matches, because this is the only plugin + # that can. If astropy is not installed, a useful error follows. + return request.extension in self.extensions + + def _can_write(self, request): + # No write support + return False + + # -- reader + + class Reader(Format.Reader): + def _open(self, cache=False, **kwargs): + if not _fits: + load_lib() + hdulist = _fits.open(self.request.get_file(), cache=cache, **kwargs) + + self._index = [] + allowed_hdu_types = (_fits.ImageHDU, _fits.PrimaryHDU, _fits.CompImageHDU) + for n, hdu in zip(range(len(hdulist)), hdulist): + if isinstance(hdu, allowed_hdu_types): + # Ignore (primary) header units with no data (use '.size' + # rather than '.data' to avoid actually loading the image): + if hdu.size > 0: + self._index.append(n) + self._hdulist = hdulist + + def _close(self): + self._hdulist.close() + + def _get_length(self): + return len(self._index) + + def _get_data(self, index): + # Get data + if index < 0 or index >= len(self._index): + raise IndexError("Index out of range while reading from fits") + im = self._hdulist[self._index[index]].data + # Return array and empty meta data + return im, {} + + def _get_meta_data(self, index): + # Get the meta data for the given index + raise RuntimeError("The fits format does not support meta data.") diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/freeimage.py b/parrot/lib/python3.10/site-packages/imageio/plugins/freeimage.py new file mode 100644 index 0000000000000000000000000000000000000000..922899f88f1b68c5d2be8c4ff215383590fa7709 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/freeimage.py @@ -0,0 +1,404 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read/Write images using FreeImage. + +Backend Library: `FreeImage `_ + +.. note:: + To use this plugin you have to install its backend:: + + imageio_download_bin freeimage + + or you can download the backend using the function:: + + imageio.plugins.freeimage.download() + +Each Freeimage format has the ``flags`` keyword argument. See the `Freeimage +documentation `_ for more information. + +Parameters +---------- +flags : int + A freeimage-specific option. In most cases we provide explicit + parameters for influencing image reading. + +""" + +import numpy as np + +from ..core import Format, image_as_uint +from ..core.request import RETURN_BYTES +from ._freeimage import FNAME_PER_PLATFORM, IO_FLAGS, download, fi # noqa + +# todo: support files with only meta data + + +class FreeimageFormat(Format): + """See :mod:`imageio.plugins.freeimage`""" + + _modes = "i" + + def __init__(self, name, description, extensions=None, modes=None, *, fif=None): + super().__init__(name, description, extensions=extensions, modes=modes) + self._fif = fif + + @property + def fif(self): + return self._fif # Set when format is created + + def _can_read(self, request): + # Ask freeimage if it can read it, maybe ext missing + if fi.has_lib(): + if not hasattr(request, "_fif"): + try: + request._fif = fi.getFIF(request.filename, "r", request.firstbytes) + except Exception: # pragma: no cover + request._fif = -1 + if request._fif == self.fif: + return True + elif request._fif == 7 and self.fif == 14: + # PPM gets identified as PBM and PPM can read PBM + # see: https://github.com/imageio/imageio/issues/677 + return True + + def _can_write(self, request): + # Ask freeimage, because we are not aware of all formats + if fi.has_lib(): + if not hasattr(request, "_fif"): + try: + request._fif = fi.getFIF(request.filename, "w") + except ValueError: # pragma: no cover + if request.raw_uri == RETURN_BYTES: + request._fif = self.fif + else: + request._fif = -1 + if request._fif is self.fif: + return True + + # -- + + class Reader(Format.Reader): + def _get_length(self): + return 1 + + def _open(self, flags=0): + self._bm = fi.create_bitmap(self.request.filename, self.format.fif, flags) + self._bm.load_from_filename(self.request.get_local_filename()) + + def _close(self): + self._bm.close() + + def _get_data(self, index): + if index != 0: + raise IndexError("This format only supports singleton images.") + return self._bm.get_image_data(), self._bm.get_meta_data() + + def _get_meta_data(self, index): + if not (index is None or index == 0): + raise IndexError() + return self._bm.get_meta_data() + + # -- + + class Writer(Format.Writer): + def _open(self, flags=0): + self._flags = flags # Store flags for later use + self._bm = None + self._is_set = False # To prevent appending more than one image + self._meta = {} + + def _close(self): + # Set global meta data + self._bm.set_meta_data(self._meta) + # Write and close + self._bm.save_to_filename(self.request.get_local_filename()) + self._bm.close() + + def _append_data(self, im, meta): + # Check if set + if not self._is_set: + self._is_set = True + else: + raise RuntimeError( + "Singleton image; " "can only append image data once." + ) + # Pop unit dimension for grayscale images + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + # Lazy instantaion of the bitmap, we need image data + if self._bm is None: + self._bm = fi.create_bitmap( + self.request.filename, self.format.fif, self._flags + ) + self._bm.allocate(im) + # Set data + self._bm.set_image_data(im) + # There is no distinction between global and per-image meta data + # for singleton images + self._meta = meta + + def _set_meta_data(self, meta): + self._meta = meta + + +# Special plugins + +# todo: there is also FIF_LOAD_NOPIXELS, +# but perhaps that should be used with get_meta_data. + + +class FreeimageBmpFormat(FreeimageFormat): + """A BMP format based on the Freeimage library. + + This format supports grayscale, RGB and RGBA images. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for saving + --------------------- + compression : bool + Whether to compress the bitmap using RLE when saving. Default False. + It seems this does not always work, but who cares, you should use + PNG anyway. + + """ + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0, compression=False): + # Build flags from kwargs + flags = int(flags) + if compression: + flags |= IO_FLAGS.BMP_SAVE_RLE + else: + flags |= IO_FLAGS.BMP_DEFAULT + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + def _append_data(self, im, meta): + im = image_as_uint(im, bitdepth=8) + return FreeimageFormat.Writer._append_data(self, im, meta) + + +class FreeimagePngFormat(FreeimageFormat): + """A PNG format based on the Freeimage library. + + This format supports grayscale, RGB and RGBA images. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + ignoregamma : bool + Avoid gamma correction. Default True. + + Parameters for saving + --------------------- + compression : {0, 1, 6, 9} + The compression factor. Higher factors result in more + compression at the cost of speed. Note that PNG compression is + always lossless. Default 9. + quantize : int + If specified, turn the given RGB or RGBA image in a paletted image + for more efficient storage. The value should be between 2 and 256. + If the value of 0 the image is not quantized. + interlaced : bool + Save using Adam7 interlacing. Default False. + """ + + class Reader(FreeimageFormat.Reader): + def _open(self, flags=0, ignoregamma=True): + # Build flags from kwargs + flags = int(flags) + if ignoregamma: + flags |= IO_FLAGS.PNG_IGNOREGAMMA + # Enter as usual, with modified flags + return FreeimageFormat.Reader._open(self, flags) + + # -- + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0, compression=9, quantize=0, interlaced=False): + compression_map = { + 0: IO_FLAGS.PNG_Z_NO_COMPRESSION, + 1: IO_FLAGS.PNG_Z_BEST_SPEED, + 6: IO_FLAGS.PNG_Z_DEFAULT_COMPRESSION, + 9: IO_FLAGS.PNG_Z_BEST_COMPRESSION, + } + # Build flags from kwargs + flags = int(flags) + if interlaced: + flags |= IO_FLAGS.PNG_INTERLACED + try: + flags |= compression_map[compression] + except KeyError: + raise ValueError("Png compression must be 0, 1, 6, or 9.") + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + def _append_data(self, im, meta): + if str(im.dtype) == "uint16": + im = image_as_uint(im, bitdepth=16) + else: + im = image_as_uint(im, bitdepth=8) + FreeimageFormat.Writer._append_data(self, im, meta) + # Quantize? + q = int(self.request.kwargs.get("quantize", False)) + if not q: + pass + elif not (im.ndim == 3 and im.shape[-1] == 3): + raise ValueError("Can only quantize RGB images") + elif q < 2 or q > 256: + raise ValueError("PNG quantize param must be 2..256") + else: + bm = self._bm.quantize(0, q) + self._bm.close() + self._bm = bm + + +class FreeimageJpegFormat(FreeimageFormat): + """A JPEG format based on the Freeimage library. + + This format supports grayscale and RGB images. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + exifrotate : bool + Automatically rotate the image according to the exif flag. + Default True. If 2 is given, do the rotation in Python instead + of freeimage. + quickread : bool + Read the image more quickly, at the expense of quality. + Default False. + + Parameters for saving + --------------------- + quality : scalar + The compression factor of the saved image (1..100), higher + numbers result in higher quality but larger file size. Default 75. + progressive : bool + Save as a progressive JPEG file (e.g. for images on the web). + Default False. + optimize : bool + On saving, compute optimal Huffman coding tables (can reduce a + few percent of file size). Default False. + baseline : bool + Save basic JPEG, without metadata or any markers. Default False. + + """ + + class Reader(FreeimageFormat.Reader): + def _open(self, flags=0, exifrotate=True, quickread=False): + # Build flags from kwargs + flags = int(flags) + if exifrotate and exifrotate != 2: + flags |= IO_FLAGS.JPEG_EXIFROTATE + if not quickread: + flags |= IO_FLAGS.JPEG_ACCURATE + # Enter as usual, with modified flags + return FreeimageFormat.Reader._open(self, flags) + + def _get_data(self, index): + im, meta = FreeimageFormat.Reader._get_data(self, index) + im = self._rotate(im, meta) + return im, meta + + def _rotate(self, im, meta): + """Use Orientation information from EXIF meta data to + orient the image correctly. Freeimage is also supposed to + support that, and I am pretty sure it once did, but now it + does not, so let's just do it in Python. + Edit: and now it works again, just leave in place as a fallback. + """ + if self.request.kwargs.get("exifrotate", None) == 2: + try: + ori = meta["EXIF_MAIN"]["Orientation"] + except KeyError: # pragma: no cover + pass # Orientation not available + else: # pragma: no cover - we cannot touch all cases + # www.impulseadventure.com/photo/exif-orientation.html + if ori in [1, 2]: + pass + if ori in [3, 4]: + im = np.rot90(im, 2) + if ori in [5, 6]: + im = np.rot90(im, 3) + if ori in [7, 8]: + im = np.rot90(im) + if ori in [2, 4, 5, 7]: # Flipped cases (rare) + im = np.fliplr(im) + return im + + # -- + + class Writer(FreeimageFormat.Writer): + def _open( + self, flags=0, quality=75, progressive=False, optimize=False, baseline=False + ): + # Test quality + quality = int(quality) + if quality < 1 or quality > 100: + raise ValueError("JPEG quality should be between 1 and 100.") + # Build flags from kwargs + flags = int(flags) + flags |= quality + if progressive: + flags |= IO_FLAGS.JPEG_PROGRESSIVE + if optimize: + flags |= IO_FLAGS.JPEG_OPTIMIZE + if baseline: + flags |= IO_FLAGS.JPEG_BASELINE + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + def _append_data(self, im, meta): + if im.ndim == 3 and im.shape[-1] == 4: + raise IOError("JPEG does not support alpha channel.") + im = image_as_uint(im, bitdepth=8) + return FreeimageFormat.Writer._append_data(self, im, meta) + + +class FreeimagePnmFormat(FreeimageFormat): + """A PNM format based on the Freeimage library. + + This format supports single bit (PBM), grayscale (PGM) and RGB (PPM) + images, even with ASCII or binary coding. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for saving + --------------------- + use_ascii : bool + Save with ASCII coding. Default True. + """ + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0, use_ascii=True): + # Build flags from kwargs + flags = int(flags) + if use_ascii: + flags |= IO_FLAGS.PNM_SAVE_ASCII + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/freeimagemulti.py b/parrot/lib/python3.10/site-packages/imageio/plugins/freeimagemulti.py new file mode 100644 index 0000000000000000000000000000000000000000..bad53d40f9cdd979304982c4b2430d53b4872b27 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/freeimagemulti.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Plugin for multi-image freeimafe formats, like animated GIF and ico. +""" + +import logging +import numpy as np + +from ..core import Format, image_as_uint +from ._freeimage import fi, IO_FLAGS +from .freeimage import FreeimageFormat + +logger = logging.getLogger(__name__) + + +class FreeimageMulti(FreeimageFormat): + """Base class for freeimage formats that support multiple images.""" + + _modes = "iI" + _fif = -1 + + class Reader(Format.Reader): + def _open(self, flags=0): + flags = int(flags) + # Create bitmap + self._bm = fi.create_multipage_bitmap( + self.request.filename, self.format.fif, flags + ) + self._bm.load_from_filename(self.request.get_local_filename()) + + def _close(self): + self._bm.close() + + def _get_length(self): + return len(self._bm) + + def _get_data(self, index): + sub = self._bm.get_page(index) + try: + return sub.get_image_data(), sub.get_meta_data() + finally: + sub.close() + + def _get_meta_data(self, index): + index = index or 0 + if index < 0 or index >= len(self._bm): + raise IndexError() + sub = self._bm.get_page(index) + try: + return sub.get_meta_data() + finally: + sub.close() + + # -- + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0): + # Set flags + self._flags = flags = int(flags) + # Instantiate multi-page bitmap + self._bm = fi.create_multipage_bitmap( + self.request.filename, self.format.fif, flags + ) + self._bm.save_to_filename(self.request.get_local_filename()) + + def _close(self): + # Close bitmap + self._bm.close() + + def _append_data(self, im, meta): + # Prepare data + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + im = image_as_uint(im, bitdepth=8) + # Create sub bitmap + sub1 = fi.create_bitmap(self._bm._filename, self.format.fif) + # Let subclass add data to bitmap, optionally return new + sub2 = self._append_bitmap(im, meta, sub1) + # Add + self._bm.append_bitmap(sub2) + sub2.close() + if sub1 is not sub2: + sub1.close() + + def _append_bitmap(self, im, meta, bitmap): + # Set data + bitmap.allocate(im) + bitmap.set_image_data(im) + bitmap.set_meta_data(meta) + # Return that same bitmap + return bitmap + + def _set_meta_data(self, meta): + pass # ignore global meta data + + +class MngFormat(FreeimageMulti): + """An Mng format based on the Freeimage library. + + Read only. Seems broken. + """ + + _fif = 6 + + def _can_write(self, request): # pragma: no cover + return False + + +class IcoFormat(FreeimageMulti): + """An ICO format based on the Freeimage library. + + This format supports grayscale, RGB and RGBA images. + + The freeimage plugin requires a `freeimage` binary. If this binary + is not available on the system, it can be downloaded by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + makealpha : bool + Convert to 32-bit and create an alpha channel from the AND- + mask when loading. Default False. Note that this returns wrong + results if the image was already RGBA. + + """ + + _fif = 1 + + class Reader(FreeimageMulti.Reader): + def _open(self, flags=0, makealpha=False): + # Build flags from kwargs + flags = int(flags) + if makealpha: + flags |= IO_FLAGS.ICO_MAKEALPHA + return FreeimageMulti.Reader._open(self, flags) + + +class GifFormat(FreeimageMulti): + """A format for reading and writing static and animated GIF, based + on the Freeimage library. + + Images read with this format are always RGBA. Currently, + the alpha channel is ignored when saving RGB images with this + format. + + The freeimage plugin requires a `freeimage` binary. If this binary + is not available on the system, it can be downloaded by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + playback : bool + 'Play' the GIF to generate each frame (as 32bpp) instead of + returning raw frame data when loading. Default True. + + Parameters for saving + --------------------- + loop : int + The number of iterations. Default 0 (meaning loop indefinitely) + duration : {float, list} + The duration (in seconds) of each frame. Either specify one value + that is used for all frames, or one value for each frame. + Note that in the GIF format the duration/delay is expressed in + hundredths of a second, which limits the precision of the duration. + fps : float + The number of frames per second. If duration is not given, the + duration for each frame is set to 1/fps. Default 10. + palettesize : int + The number of colors to quantize the image to. Is rounded to + the nearest power of two. Default 256. + quantizer : {'wu', 'nq'} + The quantization algorithm: + * wu - Wu, Xiaolin, Efficient Statistical Computations for + Optimal Color Quantization + * nq (neuqant) - Dekker A. H., Kohonen neural networks for + optimal color quantization + subrectangles : bool + If True, will try and optimize the GIF by storing only the + rectangular parts of each frame that change with respect to the + previous. Unfortunately, this option seems currently broken + because FreeImage does not handle DisposalMethod correctly. + Default False. + """ + + _fif = 25 + + class Reader(FreeimageMulti.Reader): + def _open(self, flags=0, playback=True): + # Build flags from kwargs + flags = int(flags) + if playback: + flags |= IO_FLAGS.GIF_PLAYBACK + FreeimageMulti.Reader._open(self, flags) + + def _get_data(self, index): + im, meta = FreeimageMulti.Reader._get_data(self, index) + # im = im[:, :, :3] # Drop alpha channel + return im, meta + + # -- writer + + class Writer(FreeimageMulti.Writer): + # todo: subrectangles + # todo: global palette + + def _open( + self, + flags=0, + loop=0, + duration=None, + fps=10, + palettesize=256, + quantizer="Wu", + subrectangles=False, + ): + # Check palettesize + if palettesize < 2 or palettesize > 256: + raise ValueError("GIF quantize param must be 2..256") + if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]: + palettesize = 2 ** int(np.log2(128) + 0.999) + logger.warning( + "Warning: palettesize (%r) modified to a factor of " + "two between 2-256." % palettesize + ) + self._palettesize = palettesize + # Check quantizer + self._quantizer = {"wu": 0, "nq": 1}.get(quantizer.lower(), None) + if self._quantizer is None: + raise ValueError('Invalid quantizer, must be "wu" or "nq".') + # Check frametime + if duration is None: + self._frametime = [int(1000 / float(fps) + 0.5)] + elif isinstance(duration, list): + self._frametime = [int(1000 * d) for d in duration] + elif isinstance(duration, (float, int)): + self._frametime = [int(1000 * duration)] + else: + raise ValueError("Invalid value for duration: %r" % duration) + # Check subrectangles + self._subrectangles = bool(subrectangles) + self._prev_im = None + # Init + FreeimageMulti.Writer._open(self, flags) + # Set global meta data + self._meta = {} + self._meta["ANIMATION"] = { + # 'GlobalPalette': np.array([0]).astype(np.uint8), + "Loop": np.array([loop]).astype(np.uint32), + # 'LogicalWidth': np.array([x]).astype(np.uint16), + # 'LogicalHeight': np.array([x]).astype(np.uint16), + } + + def _append_bitmap(self, im, meta, bitmap): + # Prepare meta data + meta = meta.copy() + meta_a = meta["ANIMATION"] = {} + # If this is the first frame, assign it our "global" meta data + if len(self._bm) == 0: + meta.update(self._meta) + meta_a = meta["ANIMATION"] + # Set frame time + index = len(self._bm) + if index < len(self._frametime): + ft = self._frametime[index] + else: + ft = self._frametime[-1] + meta_a["FrameTime"] = np.array([ft]).astype(np.uint32) + # Check array + if im.ndim == 3 and im.shape[-1] == 4: + im = im[:, :, :3] + # Process subrectangles + im_uncropped = im + if self._subrectangles and self._prev_im is not None: + im, xy = self._get_sub_rectangles(self._prev_im, im) + meta_a["DisposalMethod"] = np.array([1]).astype(np.uint8) + meta_a["FrameLeft"] = np.array([xy[0]]).astype(np.uint16) + meta_a["FrameTop"] = np.array([xy[1]]).astype(np.uint16) + self._prev_im = im_uncropped + # Set image data + sub2 = sub1 = bitmap + sub1.allocate(im) + sub1.set_image_data(im) + # Quantize it if its RGB + if im.ndim == 3 and im.shape[-1] == 3: + sub2 = sub1.quantize(self._quantizer, self._palettesize) + # Set meta data and return + sub2.set_meta_data(meta) + return sub2 + + def _get_sub_rectangles(self, prev, im): + """ + Calculate the minimal rectangles that need updating each frame. + Returns a two-element tuple containing the cropped images and a + list of x-y positions. + """ + # Get difference, sum over colors + diff = np.abs(im - prev) + if diff.ndim == 3: + diff = diff.sum(2) + # Get begin and end for both dimensions + X = np.argwhere(diff.sum(0)) + Y = np.argwhere(diff.sum(1)) + # Get rect coordinates + if X.size and Y.size: + x0, x1 = int(X[0]), int(X[-1]) + 1 + y0, y1 = int(Y[0]), int(Y[-1]) + 1 + else: # No change ... make it minimal + x0, x1 = 0, 2 + y0, y1 = 0, 2 + # Cut out and return + return im[y0:y1, x0:x1], (x0, y0) diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/gdal.py b/parrot/lib/python3.10/site-packages/imageio/plugins/gdal.py new file mode 100644 index 0000000000000000000000000000000000000000..04cabb7e3ea71605328e4f7c915ea947ec37afbd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/gdal.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read GDAL files. + +Backend: `GDAL `_ + +.. note:: + To use this plugin you have to install its backend:: + + pip install imageio[gdal] + +Parameters +---------- +none +""" + +from ..core import Format, has_module + +_gdal = None # lazily loaded in load_lib() + + +def load_lib(): + global _gdal + try: + import osgeo.gdal as _gdal + except ImportError: + raise ImportError( + "The GDAL format relies on the GDAL package." + "Please refer to http://www.gdal.org/" + "for further instructions." + ) + return _gdal + + +GDAL_FORMATS = (".tiff", " .tif", ".img", ".ecw", ".jpg", ".jpeg") + + +class GdalFormat(Format): + """See :mod:`imageio.plugins.gdal`""" + + def _can_read(self, request): + if request.extension in (".ecw",): + return True + if has_module("osgeo.gdal"): + return request.extension in self.extensions + + def _can_write(self, request): + return False + + # -- + + class Reader(Format.Reader): + def _open(self): + if not _gdal: + load_lib() + self._ds = _gdal.Open(self.request.get_local_filename()) + + def _close(self): + del self._ds + + def _get_length(self): + return 1 + + def _get_data(self, index): + if index != 0: + raise IndexError("Gdal file contains only one dataset") + return self._ds.ReadAsArray(), self._get_meta_data(index) + + def _get_meta_data(self, index): + return self._ds.GetMetadata() diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/grab.py b/parrot/lib/python3.10/site-packages/imageio/plugins/grab.py new file mode 100644 index 0000000000000000000000000000000000000000..8477863e30757740e83f55d880f2a7554dbe1521 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/grab.py @@ -0,0 +1,105 @@ +""" +PIL-based formats to take screenshots and grab from the clipboard. +""" + +import threading + +import numpy as np + +from ..core import Format + + +class BaseGrabFormat(Format): + """Base format for grab formats.""" + + _pillow_imported = False + _ImageGrab = None + + def __init__(self, *args, **kwargs): + super(BaseGrabFormat, self).__init__(*args, **kwargs) + self._lock = threading.RLock() + + def _can_write(self, request): + return False + + def _init_pillow(self): + with self._lock: + if not self._pillow_imported: + self._pillow_imported = True # more like tried to import + import PIL + + if not hasattr(PIL, "__version__"): # pragma: no cover + raise ImportError("Imageio Pillow requires " "Pillow, not PIL!") + try: + from PIL import ImageGrab + except ImportError: + return None + self._ImageGrab = ImageGrab + return self._ImageGrab + + class Reader(Format.Reader): + def _open(self): + pass + + def _close(self): + pass + + def _get_data(self, index): + return self.format._get_data(index) + + +class ScreenGrabFormat(BaseGrabFormat): + """The ScreenGrabFormat provided a means to grab screenshots using + the uri of "". + + This functionality is provided via Pillow. Note that "" is + only supported on Windows and OS X. + + Parameters for reading + ---------------------- + No parameters. + """ + + def _can_read(self, request): + if request.filename != "": + return False + return bool(self._init_pillow()) + + def _get_data(self, index): + ImageGrab = self._init_pillow() + assert ImageGrab + + pil_im = ImageGrab.grab() + assert pil_im is not None + im = np.asarray(pil_im) + return im, {} + + +class ClipboardGrabFormat(BaseGrabFormat): + """The ClipboardGrabFormat provided a means to grab image data from + the clipboard, using the uri "" + + This functionality is provided via Pillow. Note that "" is + only supported on Windows. + + Parameters for reading + ---------------------- + No parameters. + """ + + def _can_read(self, request): + if request.filename != "": + return False + return bool(self._init_pillow()) + + def _get_data(self, index): + ImageGrab = self._init_pillow() + assert ImageGrab + + pil_im = ImageGrab.grabclipboard() + if pil_im is None: + raise RuntimeError( + "There seems to be no image data on the " "clipboard now." + ) + im = np.asarray(pil_im) + return im, {} diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/lytro.py b/parrot/lib/python3.10/site-packages/imageio/plugins/lytro.py new file mode 100644 index 0000000000000000000000000000000000000000..add38ad9384ce4123191d67ba0426bb2b6016ae3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/lytro.py @@ -0,0 +1,714 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, imageio contributors +# imageio is distributed under the terms of the (new) BSD License. +# + +""" Read LFR files (Lytro Illum). + +Backend: internal + +Plugin to read Lytro Illum .lfr and .raw files as produced +by the Lytro Illum light field camera. It is actually a collection +of plugins, each supporting slightly different keyword arguments + +Parameters +---------- +meta_only : bool + Whether to only read the metadata. +include_thumbnail : bool + (only for lytro-lfr and lytro-lfp) + Whether to include an image thumbnail in the metadata. + +""" +# +# +# This code is based on work by +# David Uhlig and his lfr_reader +# (https://www.iiit.kit.edu/uhlig.php) +# Donald Dansereau and his Matlab LF Toolbox +# (http://dgd.vision/Tools/LFToolbox/) +# and Behnam Esfahbod and his Python LFP-Reader +# (https://github.com/behnam/python-lfp-reader/) + + +import os +import json +import struct +import logging + + +import numpy as np + +from ..core import Format +from ..v2 import imread + + +logger = logging.getLogger(__name__) + + +# Sensor size of Lytro Illum resp. Lytro F01 light field camera sensor +LYTRO_ILLUM_IMAGE_SIZE = (5368, 7728) +LYTRO_F01_IMAGE_SIZE = (3280, 3280) + +# Parameter of lfr file format +HEADER_LENGTH = 12 +SIZE_LENGTH = 4 # = 16 - header_length +SHA1_LENGTH = 45 # = len("sha1-") + (160 / 4) +PADDING_LENGTH = 35 # = (4*16) - header_length - size_length - sha1_length +DATA_CHUNKS_ILLUM = 11 +DATA_CHUNKS_F01 = 3 + + +class LytroFormat(Format): + """Base class for Lytro format. + The subclasses LytroLfrFormat, LytroLfpFormat, LytroIllumRawFormat and + LytroF01RawFormat implement the Lytro-LFR, Lytro-LFP and Lytro-RAW format + for the Illum and original F01 camera respectively. + Writing is not supported. + """ + + # Only single images are supported. + _modes = "i" + + def _can_write(self, request): + # Writing of Lytro files is not supported + return False + + # -- writer + + class Writer(Format.Writer): + def _open(self, flags=0): + self._fp = self.request.get_file() + + def _close(self): + # Close the reader. + # Note that the request object will close self._fp + pass + + def _append_data(self, im, meta): + # Process the given data and meta data. + raise RuntimeError("The lytro format cannot write image data.") + + def _set_meta_data(self, meta): + # Process the given meta data (global for all images) + # It is not mandatory to support this. + raise RuntimeError("The lytro format cannot write meta data.") + + +class LytroIllumRawFormat(LytroFormat): + """This is the Lytro Illum RAW format. + The raw format is a 10bit image format as used by the Lytro Illum + light field camera. The format will read the specified raw file and will + try to load a .txt or .json file with the associated meta data. + This format does not support writing. + + + Parameters for reading + ---------------------- + meta_only : bool + Whether to only read the metadata. + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.extension in (".raw",): + return True + + @staticmethod + def rearrange_bits(array): + # Do bit rearrangement for the 10-bit lytro raw format + # Normalize output to 1.0 as float64 + t0 = array[0::5] + t1 = array[1::5] + t2 = array[2::5] + t3 = array[3::5] + lsb = array[4::5] + + t0 = np.left_shift(t0, 2) + np.bitwise_and(lsb, 3) + t1 = np.left_shift(t1, 2) + np.right_shift(np.bitwise_and(lsb, 12), 2) + t2 = np.left_shift(t2, 2) + np.right_shift(np.bitwise_and(lsb, 48), 4) + t3 = np.left_shift(t3, 2) + np.right_shift(np.bitwise_and(lsb, 192), 6) + + image = np.zeros(LYTRO_ILLUM_IMAGE_SIZE, dtype=np.uint16) + image[:, 0::4] = t0.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + image[:, 1::4] = t1.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + image[:, 2::4] = t2.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + image[:, 3::4] = t3.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + + # Normalize data to 1.0 as 64-bit float. + # Division is by 1023 as the Lytro Illum saves 10-bit raw data. + return np.divide(image, 1023.0).astype(np.float64) + + # -- reader + + class Reader(Format.Reader): + def _open(self, meta_only=False): + self._file = self.request.get_file() + self._data = None + self._meta_only = meta_only + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. + return 1 + + def _get_data(self, index): + # Return the data and meta data for the given index + + if index not in [0, "None"]: + raise IndexError("Lytro file contains only one dataset") + + if not self._meta_only: + # Read all bytes + if self._data is None: + self._data = self._file.read() + + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16) + + # Rearrange bits + img = LytroIllumRawFormat.rearrange_bits(raw) + + else: + # Return empty image + img = np.array([]) + + # Return image and meta data + return img, self._get_meta_data(index=0) + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, it + # should return the global meta data. + + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + # Try to read meta data from meta data file corresponding + # to the raw data file, extension in [.txt, .TXT, .json, .JSON] + filename_base = os.path.splitext(self.request.get_local_filename())[0] + + meta_data = None + + for ext in [".txt", ".TXT", ".json", ".JSON"]: + if os.path.isfile(filename_base + ext): + meta_data = json.load(open(filename_base + ext)) + + if meta_data is not None: + return meta_data + + else: + logger.warning("No metadata file found for provided raw file.") + return {} + + +class LytroLfrFormat(LytroFormat): + """This is the Lytro Illum LFR format. + The lfr is a image and meta data container format as used by the + Lytro Illum light field camera. + The format will read the specified lfr file. + This format does not support writing. + + Parameters for reading + ---------------------- + meta_only : bool + Whether to only read the metadata. + include_thumbnail : bool + Whether to include an image thumbnail in the metadata. + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.extension in (".lfr",): + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, meta_only=False, include_thumbnail=True): + self._file = self.request.get_file() + self._data = None + self._chunks = {} + self.metadata = {} + self._content = None + self._meta_only = meta_only + self._include_thumbnail = include_thumbnail + + self._find_header() + self._find_chunks() + self._find_meta() + + try: + # Get sha1 dict and check if it is in dictionary of data chunks + chunk_dict = self._content["frames"][0]["frame"] + if ( + chunk_dict["metadataRef"] in self._chunks + and chunk_dict["imageRef"] in self._chunks + and chunk_dict["privateMetadataRef"] in self._chunks + ): + if not self._meta_only: + # Read raw image data byte buffer + data_pos, size = self._chunks[chunk_dict["imageRef"]] + self._file.seek(data_pos, 0) + self.raw_image_data = self._file.read(size) + + # Read meta data + data_pos, size = self._chunks[chunk_dict["metadataRef"]] + self._file.seek(data_pos, 0) + metadata = self._file.read(size) + # Add metadata to meta data dict + self.metadata["metadata"] = json.loads(metadata.decode("ASCII")) + + # Read private metadata + data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]] + self._file.seek(data_pos, 0) + serial_numbers = self._file.read(size) + self.serial_numbers = json.loads(serial_numbers.decode("ASCII")) + # Add private metadata to meta data dict + self.metadata["privateMetadata"] = self.serial_numbers + + # Read image preview thumbnail + if self._include_thumbnail: + chunk_dict = self._content["thumbnails"][0] + if chunk_dict["imageRef"] in self._chunks: + # Read thumbnail image from thumbnail chunk + data_pos, size = self._chunks[chunk_dict["imageRef"]] + self._file.seek(data_pos, 0) + # Read binary data, read image as jpeg + thumbnail_data = self._file.read(size) + thumbnail_img = imread(thumbnail_data, format="jpeg") + + thumbnail_height = chunk_dict["height"] + thumbnail_width = chunk_dict["width"] + + # Add thumbnail to metadata + self.metadata["thumbnail"] = { + "image": thumbnail_img, + "height": thumbnail_height, + "width": thumbnail_width, + } + + except KeyError: + raise RuntimeError("The specified file is not a valid LFR file.") + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. Can be np.inf + return 1 + + def _find_header(self): + """ + Checks if file has correct header and skip it. + """ + file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01" + # Read and check header of file + header = self._file.read(HEADER_LENGTH) + if header != file_header: + raise RuntimeError("The LFR file header is invalid.") + + # Read first bytes to skip header + self._file.read(SIZE_LENGTH) + + def _find_chunks(self): + """ + Gets start position and size of data chunks in file. + """ + chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + + for i in range(0, DATA_CHUNKS_ILLUM): + data_pos, size, sha1 = self._get_chunk(chunk_header) + self._chunks[sha1] = (data_pos, size) + + def _find_meta(self): + """ + Gets a data chunk that contains information over content + of other data chunks. + """ + meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + data_pos, size, sha1 = self._get_chunk(meta_header) + + # Get content + self._file.seek(data_pos, 0) + data = self._file.read(size) + self._content = json.loads(data.decode("ASCII")) + + def _get_chunk(self, header): + """ + Checks if chunk has correct header and skips it. + Finds start position and length of next chunk and reads + sha1-string that identifies the following data chunk. + + Parameters + ---------- + header : bytes + Byte string that identifies start of chunk. + + Returns + ------- + data_pos : int + Start position of data chunk in file. + size : int + Size of data chunk. + sha1 : str + Sha1 value of chunk. + """ + # Read and check header of chunk + header_chunk = self._file.read(HEADER_LENGTH) + if header_chunk != header: + raise RuntimeError("The LFR chunk header is invalid.") + + data_pos = None + sha1 = None + + # Read size + size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0] + if size > 0: + # Read sha1 + sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII")) + # Skip fixed null chars + self._file.read(PADDING_LENGTH) + # Find start of data and skip data + data_pos = self._file.tell() + self._file.seek(size, 1) + # Skip extra null chars + ch = self._file.read(1) + while ch == b"\0": + ch = self._file.read(1) + self._file.seek(-1, 1) + + return data_pos, size, sha1 + + def _get_data(self, index): + # Return the data and meta data for the given index + if index not in [0, None]: + raise IndexError("Lytro lfr file contains only one dataset") + + if not self._meta_only: + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype( + np.uint16 + ) + im = LytroIllumRawFormat.rearrange_bits(raw) + else: + im = np.array([]) + + # Return array and dummy meta data + return im, self.metadata + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, + # it returns the global meta data. + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + return self.metadata + + +class LytroF01RawFormat(LytroFormat): + """This is the Lytro RAW format for the original F01 Lytro camera. + The raw format is a 12bit image format as used by the Lytro F01 + light field camera. The format will read the specified raw file and will + try to load a .txt or .json file with the associated meta data. + This format does not support writing. + + + Parameters for reading + ---------------------- + meta_only : bool + Whether to only read the metadata. + + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.extension in (".raw",): + return True + + @staticmethod + def rearrange_bits(array): + # Do bit rearrangement for the 12-bit lytro raw format + # Normalize output to 1.0 as float64 + t0 = array[0::3] + t1 = array[1::3] + t2 = array[2::3] + + a0 = np.left_shift(t0, 4) + np.right_shift(np.bitwise_and(t1, 240), 4) + a1 = np.left_shift(np.bitwise_and(t1, 15), 8) + t2 + + image = np.zeros(LYTRO_F01_IMAGE_SIZE, dtype=np.uint16) + image[:, 0::2] = a0.reshape( + (LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2) + ) + image[:, 1::2] = a1.reshape( + (LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2) + ) + + # Normalize data to 1.0 as 64-bit float. + # Division is by 4095 as the Lytro F01 saves 12-bit raw data. + return np.divide(image, 4095.0).astype(np.float64) + + # -- reader + + class Reader(Format.Reader): + def _open(self, meta_only=False): + self._file = self.request.get_file() + self._data = None + self._meta_only = meta_only + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. + return 1 + + def _get_data(self, index): + # Return the data and meta data for the given index + + if index not in [0, "None"]: + raise IndexError("Lytro file contains only one dataset") + + if not self._meta_only: + # Read all bytes + if self._data is None: + self._data = self._file.read() + + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16) + + # Rearrange bits + img = LytroF01RawFormat.rearrange_bits(raw) + + else: + img = np.array([]) + + # Return image and meta data + return img, self._get_meta_data(index=0) + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, it + # should return the global meta data. + + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + # Try to read meta data from meta data file corresponding + # to the raw data file, extension in [.txt, .TXT, .json, .JSON] + filename_base = os.path.splitext(self.request.get_local_filename())[0] + + meta_data = None + + for ext in [".txt", ".TXT", ".json", ".JSON"]: + if os.path.isfile(filename_base + ext): + meta_data = json.load(open(filename_base + ext)) + + if meta_data is not None: + return meta_data + + else: + logger.warning("No metadata file found for provided raw file.") + return {} + + +class LytroLfpFormat(LytroFormat): + """This is the Lytro Illum LFP format. + The lfp is a image and meta data container format as used by the + Lytro F01 light field camera. + The format will read the specified lfp file. + This format does not support writing. + + Parameters for reading + ---------------------- + meta_only : bool + Whether to only read the metadata. + include_thumbnail : bool + Whether to include an image thumbnail in the metadata. + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.extension in (".lfp",): + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, meta_only=False): + self._file = self.request.get_file() + self._data = None + self._chunks = {} + self.metadata = {} + self._content = None + self._meta_only = meta_only + + self._find_header() + self._find_meta() + self._find_chunks() + + try: + # Get sha1 dict and check if it is in dictionary of data chunks + chunk_dict = self._content["picture"]["frameArray"][0]["frame"] + if ( + chunk_dict["metadataRef"] in self._chunks + and chunk_dict["imageRef"] in self._chunks + and chunk_dict["privateMetadataRef"] in self._chunks + ): + if not self._meta_only: + # Read raw image data byte buffer + data_pos, size = self._chunks[chunk_dict["imageRef"]] + self._file.seek(data_pos, 0) + self.raw_image_data = self._file.read(size) + + # Read meta data + data_pos, size = self._chunks[chunk_dict["metadataRef"]] + self._file.seek(data_pos, 0) + metadata = self._file.read(size) + # Add metadata to meta data dict + self.metadata["metadata"] = json.loads(metadata.decode("ASCII")) + + # Read private metadata + data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]] + self._file.seek(data_pos, 0) + serial_numbers = self._file.read(size) + self.serial_numbers = json.loads(serial_numbers.decode("ASCII")) + # Add private metadata to meta data dict + self.metadata["privateMetadata"] = self.serial_numbers + + except KeyError: + raise RuntimeError("The specified file is not a valid LFP file.") + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. Can be np.inf + return 1 + + def _find_header(self): + """ + Checks if file has correct header and skip it. + """ + file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01" + + # Read and check header of file + header = self._file.read(HEADER_LENGTH) + if header != file_header: + raise RuntimeError("The LFP file header is invalid.") + + # Read first bytes to skip header + self._file.read(SIZE_LENGTH) + + def _find_chunks(self): + """ + Gets start position and size of data chunks in file. + """ + chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + + for i in range(0, DATA_CHUNKS_F01): + data_pos, size, sha1 = self._get_chunk(chunk_header) + self._chunks[sha1] = (data_pos, size) + + def _find_meta(self): + """ + Gets a data chunk that contains information over content + of other data chunks. + """ + meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + + data_pos, size, sha1 = self._get_chunk(meta_header) + + # Get content + self._file.seek(data_pos, 0) + data = self._file.read(size) + self._content = json.loads(data.decode("ASCII")) + data = self._file.read(5) # Skip 5 + + def _get_chunk(self, header): + """ + Checks if chunk has correct header and skips it. + Finds start position and length of next chunk and reads + sha1-string that identifies the following data chunk. + + Parameters + ---------- + header : bytes + Byte string that identifies start of chunk. + + Returns + ------- + data_pos : int + Start position of data chunk in file. + size : int + Size of data chunk. + sha1 : str + Sha1 value of chunk. + """ + # Read and check header of chunk + header_chunk = self._file.read(HEADER_LENGTH) + if header_chunk != header: + raise RuntimeError("The LFP chunk header is invalid.") + + data_pos = None + sha1 = None + + # Read size + size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0] + if size > 0: + # Read sha1 + sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII")) + # Skip fixed null chars + self._file.read(PADDING_LENGTH) + # Find start of data and skip data + data_pos = self._file.tell() + self._file.seek(size, 1) + # Skip extra null chars + ch = self._file.read(1) + while ch == b"\0": + ch = self._file.read(1) + self._file.seek(-1, 1) + + return data_pos, size, sha1 + + def _get_data(self, index): + # Return the data and meta data for the given index + if index not in [0, None]: + raise IndexError("Lytro lfp file contains only one dataset") + + if not self._meta_only: + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype( + np.uint16 + ) + im = LytroF01RawFormat.rearrange_bits(raw) + else: + im = np.array([]) + + # Return array and dummy meta data + return im, self.metadata + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, + # it returns the global meta data. + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + return self.metadata diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/npz.py b/parrot/lib/python3.10/site-packages/imageio/plugins/npz.py new file mode 100644 index 0000000000000000000000000000000000000000..87b37e44a0cc85671f42d1e25c775b687c709f71 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/npz.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +"""Read/Write NPZ files. + +Backend: `Numpy `_ + +NPZ is a file format by numpy that provides storage of array data using gzip +compression. This imageio plugin supports data of any shape, and also supports +multiple images per file. However, the npz format does not provide streaming; +all data is read/written at once. Further, there is no support for meta data. + +See the BSDF format for a similar (but more fully featured) format. + +Parameters +---------- +None + +Notes +----- +This format is not available on Pypy. + +""" + +import numpy as np + +from ..core import Format + + +class NpzFormat(Format): + """See :mod:`imageio.plugins.npz`""" + + def _can_read(self, request): + # We support any kind of image data + return request.extension in self.extensions + + def _can_write(self, request): + # We support any kind of image data + return request.extension in self.extensions + + # -- reader + + class Reader(Format.Reader): + def _open(self): + # Load npz file, which provides another file like object + self._npz = np.load(self.request.get_file()) + assert isinstance(self._npz, np.lib.npyio.NpzFile) + # Get list of names, ordered by name, but smarter + self._names = sorted(self._npz.files, key=lambda x: x.split("_")[-1]) + + def _close(self): + self._npz.close() + + def _get_length(self): + return len(self._names) + + def _get_data(self, index): + # Get data + if index < 0 or index >= len(self._names): + raise IndexError("Index out of range while reading from nzp") + im = self._npz[self._names[index]] + # Return array and empty meta data + return im, {} + + def _get_meta_data(self, index): + # Get the meta data for the given index + raise RuntimeError("The npz format does not support meta data.") + + # -- writer + + class Writer(Format.Writer): + def _open(self): + # Npz is not such a great format. We cannot stream to the file. + # So we remember all images and write them to file at the end. + self._images = [] + + def _close(self): + # Write everything + np.savez_compressed(self.request.get_file(), *self._images) + + def _append_data(self, im, meta): + self._images.append(im) # discart meta data + + def set_meta_data(self, meta): + raise RuntimeError("The npz format does not support meta data.") diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/opencv.py b/parrot/lib/python3.10/site-packages/imageio/plugins/opencv.py new file mode 100644 index 0000000000000000000000000000000000000000..944a75776b8b091405da342703ab76b26677c0e9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/opencv.py @@ -0,0 +1,313 @@ +"""Read/Write images using OpenCV. + +Backend Library: `OpenCV `_ + +This plugin wraps OpenCV (also known as ``cv2``), a popular image processing +library. Currently, it exposes OpenCVs image reading capability (no video or GIF +support yet); however, this may be added in future releases. + +Methods +------- +.. note:: + Check the respective function for a list of supported kwargs and their + documentation. + +.. autosummary:: + :toctree: + + OpenCVPlugin.read + OpenCVPlugin.iter + OpenCVPlugin.write + OpenCVPlugin.properties + OpenCVPlugin.metadata + +Pixel Formats (Colorspaces) +--------------------------- + +OpenCV is known to process images in BGR; however, most of the python ecosystem +(in particular matplotlib and other pydata libraries) use the RGB. As such, +images are converted to RGB, RGBA, or grayscale (where applicable) by default. + +""" + +import warnings +from pathlib import Path +from typing import Any, Dict, List, Optional, Union + +import cv2 +import numpy as np + +from ..core import Request +from ..core.request import URI_BYTES, InitializationError, IOMode +from ..core.v3_plugin_api import ImageProperties, PluginV3 +from ..typing import ArrayLike + + +class OpenCVPlugin(PluginV3): + def __init__(self, request: Request) -> None: + super().__init__(request) + + self.file_handle = request.get_local_filename() + if request._uri_type is URI_BYTES: + self.filename = "" + else: + self.filename = request.raw_uri + + mode = request.mode.io_mode + if mode == IOMode.read and not cv2.haveImageReader(self.file_handle): + raise InitializationError(f"OpenCV can't read `{self.filename}`.") + elif mode == IOMode.write and not cv2.haveImageWriter(self.file_handle): + raise InitializationError(f"OpenCV can't write to `{self.filename}`.") + + def read( + self, + *, + index: int = None, + colorspace: Union[int, str] = None, + flags: int = cv2.IMREAD_COLOR, + ) -> np.ndarray: + """Read an image from the ImageResource. + + Parameters + ---------- + index : int, Ellipsis + If int, read the index-th image from the ImageResource. If ``...``, + read all images from the ImageResource and stack them along a new, + prepended, batch dimension. If None (default), use ``index=0`` if + the image contains exactly one image and ``index=...`` otherwise. + colorspace : str, int + The colorspace to convert into after loading and before returning + the image. If None (default) keep grayscale images as is, convert + images with an alpha channel to ``RGBA`` and all other images to + ``RGB``. If int, interpret ``colorspace`` as one of OpenCVs + `conversion flags + `_ + and use it for conversion. If str, convert the image into the given + colorspace. Possible string values are: ``"RGB"``, ``"BGR"``, + ``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``. + flags : int + The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs + `_ + for details. + + Returns + ------- + ndimage : np.ndarray + The decoded image as a numpy array. + + """ + + if index is None: + n_images = cv2.imcount(self.file_handle, flags) + index = 0 if n_images == 1 else ... + + if index is ...: + retval, img = cv2.imreadmulti(self.file_handle, flags=flags) + is_batch = True + else: + retval, img = cv2.imreadmulti(self.file_handle, index, 1, flags=flags) + is_batch = False + + if retval is False: + raise ValueError(f"Could not read index `{index}` from `{self.filename}`.") + + if img[0].ndim == 2: + in_colorspace = "GRAY" + out_colorspace = colorspace or "GRAY" + elif img[0].shape[-1] == 4: + in_colorspace = "BGRA" + out_colorspace = colorspace or "RGBA" + else: + in_colorspace = "BGR" + out_colorspace = colorspace or "RGB" + + if isinstance(colorspace, int): + cvt_space = colorspace + elif in_colorspace == out_colorspace.upper(): + cvt_space = None + else: + out_colorspace = out_colorspace.upper() + cvt_space = getattr(cv2, f"COLOR_{in_colorspace}2{out_colorspace}") + + if cvt_space is not None: + img = np.stack([cv2.cvtColor(x, cvt_space) for x in img]) + else: + img = np.stack(img) + + return img if is_batch else img[0] + + def iter( + self, + colorspace: Union[int, str] = None, + flags: int = cv2.IMREAD_COLOR, + ) -> np.ndarray: + """Yield images from the ImageResource. + + Parameters + ---------- + colorspace : str, int + The colorspace to convert into after loading and before returning + the image. If None (default) keep grayscale images as is, convert + images with an alpha channel to ``RGBA`` and all other images to + ``RGB``. If int, interpret ``colorspace`` as one of OpenCVs + `conversion flags + `_ + and use it for conversion. If str, convert the image into the given + colorspace. Possible string values are: ``"RGB"``, ``"BGR"``, + ``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``. + flags : int + The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs + `_ + for details. + + Yields + ------ + ndimage : np.ndarray + The decoded image as a numpy array. + + """ + for idx in range(cv2.imcount(self.file_handle)): + yield self.read(index=idx, flags=flags, colorspace=colorspace) + + def write( + self, + ndimage: Union[ArrayLike, List[ArrayLike]], + is_batch: bool = False, + params: List[int] = None, + ) -> Optional[bytes]: + """Save an ndimage in the ImageResource. + + Parameters + ---------- + ndimage : ArrayLike, List[ArrayLike] + The image data that will be written to the file. It is either a + single image, a batch of images, or a list of images. + is_batch : bool + If True, the provided ndimage is a batch of images. If False (default), the + provided ndimage is a single image. If the provided ndimage is a list of images, + this parameter has no effect. + params : List[int] + A list of parameters that will be passed to OpenCVs imwrite or + imwritemulti functions. Possible values are documented in the + `OpenCV documentation + `_. + + Returns + ------- + encoded_image : bytes, None + If the ImageResource is ``""`` the call to write returns the + encoded image as a bytes string. Otherwise it returns None. + + """ + + if isinstance(ndimage, list): + ndimage = np.stack(ndimage, axis=0) + elif not is_batch: + ndimage = ndimage[None, ...] + + if ndimage[0].ndim == 2: + n_channels = 1 + else: + n_channels = ndimage[0].shape[-1] + + if n_channels == 1: + ndimage_cv2 = [x for x in ndimage] + elif n_channels == 4: + ndimage_cv2 = [cv2.cvtColor(x, cv2.COLOR_RGBA2BGRA) for x in ndimage] + else: + ndimage_cv2 = [cv2.cvtColor(x, cv2.COLOR_RGB2BGR) for x in ndimage] + + retval = cv2.imwritemulti(self.file_handle, ndimage_cv2, params) + + if retval is False: + # not sure what scenario would trigger this, but + # it can occur theoretically. + raise IOError("OpenCV failed to write.") # pragma: no cover + + if self.request._uri_type == URI_BYTES: + return Path(self.file_handle).read_bytes() + + def properties( + self, + index: int = None, + colorspace: Union[int, str] = None, + flags: int = cv2.IMREAD_COLOR, + ) -> ImageProperties: + """Standardized image metadata. + + Parameters + ---------- + index : int, Ellipsis + If int, get the properties of the index-th image in the + ImageResource. If ``...``, get the properties of the image stack + that contains all images. If None (default), use ``index=0`` if the + image contains exactly one image and ``index=...`` otherwise. + colorspace : str, int + The colorspace to convert into after loading and before returning + the image. If None (default) keep grayscale images as is, convert + images with an alpha channel to ``RGBA`` and all other images to + ``RGB``. If int, interpret ``colorspace`` as one of OpenCVs + `conversion flags + `_ + and use it for conversion. If str, convert the image into the given + colorspace. Possible string values are: ``"RGB"``, ``"BGR"``, + ``"RGBA"``, ``"BGRA"``, ``"GRAY"``, ``"HSV"``, or ``"LAB"``. + flags : int + The OpenCV flag(s) to pass to the reader. Refer to the `OpenCV docs + `_ + for details. + + Returns + ------- + props : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + Reading properties with OpenCV involves decoding pixel data, because + OpenCV doesn't provide a direct way to access metadata. + + """ + + if index is None: + n_images = cv2.imcount(self.file_handle, flags) + is_batch = n_images > 1 + elif index is Ellipsis: + n_images = cv2.imcount(self.file_handle, flags) + is_batch = True + else: + is_batch = False + + # unfortunately, OpenCV doesn't allow reading shape without reading pixel data + if is_batch: + img = self.read(index=0, flags=flags, colorspace=colorspace) + return ImageProperties( + shape=(n_images, *img.shape), + dtype=img.dtype, + n_images=n_images, + is_batch=True, + ) + + img = self.read(index=index, flags=flags, colorspace=colorspace) + return ImageProperties(shape=img.shape, dtype=img.dtype, is_batch=False) + + def metadata( + self, index: int = None, exclude_applied: bool = True + ) -> Dict[str, Any]: + """Format-specific metadata. + + .. warning:: + OpenCV does not support reading metadata. When called, this function + will raise a ``NotImplementedError``. + + Parameters + ---------- + index : int + This parameter has no effect. + exclude_applied : bool + This parameter has no effect. + + """ + + warnings.warn("OpenCV does not support reading metadata.", UserWarning) + return dict() diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/pillow.py b/parrot/lib/python3.10/site-packages/imageio/plugins/pillow.py new file mode 100644 index 0000000000000000000000000000000000000000..8826f35cc2c52fc3827fa826894db423b3661719 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/pillow.py @@ -0,0 +1,613 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write images using Pillow/PIL. + +Backend Library: `Pillow `_ + +Plugin that wraps the the Pillow library. Pillow is a friendly fork of PIL +(Python Image Library) and supports reading and writing of common formats (jpg, +png, gif, tiff, ...). For, the complete list of features and supported formats +please refer to pillows official docs (see the Backend Library link). + +Parameters +---------- +request : Request + A request object representing the resource to be operated on. + +Methods +------- + +.. autosummary:: + :toctree: _plugins/pillow + + PillowPlugin.read + PillowPlugin.write + PillowPlugin.iter + PillowPlugin.get_meta + +""" + +import sys +import warnings +from io import BytesIO +from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union, cast + +import numpy as np +from PIL import ExifTags, GifImagePlugin, Image, ImageSequence, UnidentifiedImageError +from PIL import __version__ as pil_version # type: ignore + +from ..core.request import URI_BYTES, InitializationError, IOMode, Request +from ..core.v3_plugin_api import ImageProperties, PluginV3 +from ..typing import ArrayLike + + +def pillow_version() -> Tuple[int]: + return tuple(int(x) for x in pil_version.split(".")) + + +def _exif_orientation_transform(orientation: int, mode: str) -> Callable: + # get transformation that transforms an image from a + # given EXIF orientation into the standard orientation + + # -1 if the mode has color channel, 0 otherwise + axis = -2 if Image.getmodebands(mode) > 1 else -1 + + EXIF_ORIENTATION = { + 1: lambda x: x, + 2: lambda x: np.flip(x, axis=axis), + 3: lambda x: np.rot90(x, k=2), + 4: lambda x: np.flip(x, axis=axis - 1), + 5: lambda x: np.flip(np.rot90(x, k=3), axis=axis), + 6: lambda x: np.rot90(x, k=3), + 7: lambda x: np.flip(np.rot90(x, k=1), axis=axis), + 8: lambda x: np.rot90(x, k=1), + } + + return EXIF_ORIENTATION[orientation] + + +class PillowPlugin(PluginV3): + def __init__(self, request: Request) -> None: + """Instantiate a new Pillow Plugin Object + + Parameters + ---------- + request : {Request} + A request object representing the resource to be operated on. + + """ + + super().__init__(request) + + # Register HEIF opener for Pillow + try: + from pillow_heif import register_heif_opener + except ImportError: + pass + else: + register_heif_opener() + + # Register AVIF opener for Pillow + try: + from pillow_heif import register_avif_opener + except ImportError: + pass + else: + register_avif_opener() + + self._image: Image = None + self.images_to_write = [] + + if request.mode.io_mode == IOMode.read: + try: + with Image.open(request.get_file()): + # Check if it is generally possible to read the image. + # This will not read any data and merely try to find a + # compatible pillow plugin (ref: the pillow docs). + pass + except UnidentifiedImageError: + if request._uri_type == URI_BYTES: + raise InitializationError( + "Pillow can not read the provided bytes." + ) from None + else: + raise InitializationError( + f"Pillow can not read {request.raw_uri}." + ) from None + + self._image = Image.open(self._request.get_file()) + else: + self.save_args = {} + + extension = self.request.extension or self.request.format_hint + if extension is None: + warnings.warn( + "Can't determine file format to write as. You _must_" + " set `format` during write or the call will fail. Use " + "`extension` to supress this warning. ", + UserWarning, + ) + return + + tirage = [Image.preinit, Image.init] + for format_loader in tirage: + format_loader() + if extension in Image.registered_extensions().keys(): + return + + raise InitializationError( + f"Pillow can not write `{extension}` files." + ) from None + + def close(self) -> None: + self._flush_writer() + + if self._image: + self._image.close() + + self._request.finish() + + def read( + self, + *, + index: int = None, + mode: str = None, + rotate: bool = False, + apply_gamma: bool = False, + writeable_output: bool = True, + pilmode: str = None, + exifrotate: bool = None, + as_gray: bool = None, + ) -> np.ndarray: + """ + Parses the given URI and creates a ndarray from it. + + Parameters + ---------- + index : int + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return it. + If index is an ellipsis (...), read all ndimages in the file and + stack them along a new batch dimension and return them. If index is + None, this plugin reads the first image of the file (index=0) unless + the image is a GIF or APNG, in which case all images are read + (index=...). + mode : str + Convert the image to the given mode before returning it. If None, + the mode will be left unchanged. Possible modes can be found at: + https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes + rotate : bool + If True and the image contains an EXIF orientation tag, + apply the orientation before returning the ndimage. + apply_gamma : bool + If True and the image contains metadata about gamma, apply gamma + correction to the image. + writable_output : bool + If True, ensure that the image is writable before returning it to + the user. This incurs a full copy of the pixel data if the data + served by pillow is read-only. Consequentially, setting this flag to + False improves performance for some images. + pilmode : str + Deprecated, use `mode` instead. + exifrotate : bool + Deprecated, use `rotate` instead. + as_gray : bool + Deprecated. Exists to raise a constructive error message. + + Returns + ------- + ndimage : ndarray + A numpy array containing the loaded image data + + Notes + ----- + If you read a paletted image (e.g. GIF) then the plugin will apply the + palette by default. Should you wish to read the palette indices of each + pixel use ``mode="P"``. The coresponding color pallete can be found in + the image's metadata using the ``palette`` key when metadata is + extracted using the ``exclude_applied=False`` kwarg. The latter is + needed, as palettes are applied by default and hence excluded by default + to keep metadata and pixel data consistent. + + """ + + if pilmode is not None: + warnings.warn( + "`pilmode` is deprecated. Use `mode` instead.", DeprecationWarning + ) + mode = pilmode + + if exifrotate is not None: + warnings.warn( + "`exifrotate` is deprecated. Use `rotate` instead.", DeprecationWarning + ) + rotate = exifrotate + + if as_gray is not None: + raise TypeError( + "The keyword `as_gray` is no longer supported." + "Use `mode='F'` for a backward-compatible result, or " + " `mode='L'` for an integer-valued result." + ) + + if self._image.format == "GIF": + # Converting GIF P frames to RGB + # https://github.com/python-pillow/Pillow/pull/6150 + GifImagePlugin.LOADING_STRATEGY = ( + GifImagePlugin.LoadingStrategy.RGB_AFTER_DIFFERENT_PALETTE_ONLY + ) + + if index is None: + if self._image.format == "GIF": + index = Ellipsis + elif self._image.custom_mimetype == "image/apng": + index = Ellipsis + else: + index = 0 + + if isinstance(index, int): + # will raise IO error if index >= number of frames in image + self._image.seek(index) + image = self._apply_transforms( + self._image, mode, rotate, apply_gamma, writeable_output + ) + else: + iterator = self.iter( + mode=mode, + rotate=rotate, + apply_gamma=apply_gamma, + writeable_output=writeable_output, + ) + image = np.stack([im for im in iterator], axis=0) + + return image + + def iter( + self, + *, + mode: str = None, + rotate: bool = False, + apply_gamma: bool = False, + writeable_output: bool = True, + ) -> Iterator[np.ndarray]: + """ + Iterate over all ndimages/frames in the URI + + Parameters + ---------- + mode : {str, None} + Convert the image to the given mode before returning it. If None, + the mode will be left unchanged. Possible modes can be found at: + https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes + rotate : {bool} + If set to ``True`` and the image contains an EXIF orientation tag, + apply the orientation before returning the ndimage. + apply_gamma : {bool} + If ``True`` and the image contains metadata about gamma, apply gamma + correction to the image. + writable_output : bool + If True, ensure that the image is writable before returning it to + the user. This incurs a full copy of the pixel data if the data + served by pillow is read-only. Consequentially, setting this flag to + False improves performance for some images. + """ + + for im in ImageSequence.Iterator(self._image): + yield self._apply_transforms( + im, mode, rotate, apply_gamma, writeable_output + ) + + def _apply_transforms( + self, image, mode, rotate, apply_gamma, writeable_output + ) -> np.ndarray: + if mode is not None: + image = image.convert(mode) + elif image.mode == "P": + # adjust for pillow9 changes + # see: https://github.com/python-pillow/Pillow/issues/5929 + image = image.convert(image.palette.mode) + elif image.format == "PNG" and image.mode == "I": + major, minor, patch = pillow_version() + + if sys.byteorder == "little": + desired_mode = "I;16" + else: # pragma: no cover + # can't test big-endian in GH-Actions + desired_mode = "I;16B" + + if major < 10: # pragma: no cover + warnings.warn( + "Loading 16-bit (uint16) PNG as int32 due to limitations " + "in pillow's PNG decoder. This will be fixed in a future " + "version of pillow which will make this warning dissapear.", + UserWarning, + ) + elif minor < 1: # pragma: no cover + # pillow<10.1.0 can directly decode into 16-bit grayscale + image.mode = desired_mode + else: + # pillow >= 10.1.0 + image = image.convert(desired_mode) + + image = np.asarray(image) + + meta = self.metadata(index=self._image.tell(), exclude_applied=False) + if rotate and "Orientation" in meta: + transformation = _exif_orientation_transform( + meta["Orientation"], self._image.mode + ) + image = transformation(image) + + if apply_gamma and "gamma" in meta: + gamma = float(meta["gamma"]) + scale = float(65536 if image.dtype == np.uint16 else 255) + gain = 1.0 + image = ((image / scale) ** gamma) * scale * gain + 0.4999 + image = np.round(image).astype(np.uint8) + + if writeable_output and not image.flags["WRITEABLE"]: + image = np.array(image) + + return image + + def write( + self, + ndimage: Union[ArrayLike, List[ArrayLike]], + *, + mode: str = None, + format: str = None, + is_batch: bool = None, + **kwargs, + ) -> Optional[bytes]: + """ + Write an ndimage to the URI specified in path. + + If the URI points to a file on the current host and the file does not + yet exist it will be created. If the file exists already, it will be + appended if possible; otherwise, it will be replaced. + + If necessary, the image is broken down along the leading dimension to + fit into individual frames of the chosen format. If the format doesn't + support multiple frames, and IOError is raised. + + Parameters + ---------- + image : ndarray or list + The ndimage to write. If a list is given each element is expected to + be an ndimage. + mode : str + Specify the image's color format. If None (default), the mode is + inferred from the array's shape and dtype. Possible modes can be + found at: + https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes + format : str + Optional format override. If omitted, the format to use is + determined from the filename extension. If a file object was used + instead of a filename, this parameter must always be used. + is_batch : bool + Explicitly tell the writer that ``image`` is a batch of images + (True) or not (False). If None, the writer will guess this from the + provided ``mode`` or ``image.shape``. While the latter often works, + it may cause problems for small images due to aliasing of spatial + and color-channel axes. + kwargs : ... + Extra arguments to pass to pillow. If a writer doesn't recognise an + option, it is silently ignored. The available options are described + in pillow's `image format documentation + `_ + for each writer. + + Notes + ----- + When writing batches of very narrow (2-4 pixels wide) gray images set + the ``mode`` explicitly to avoid the batch being identified as a colored + image. + + """ + if "fps" in kwargs: + warnings.warn( + "The keyword `fps` is no longer supported. Use `duration`" + "(in ms) instead, e.g. `fps=50` == `duration=20` (1000 * 1/50).", + DeprecationWarning, + ) + kwargs["duration"] = 1000 * 1 / kwargs.get("fps") + + if isinstance(ndimage, list): + ndimage = np.stack(ndimage, axis=0) + is_batch = True + else: + ndimage = np.asarray(ndimage) + + # check if ndimage is a batch of frames/pages (e.g. for writing GIF) + # if mode is given, use it; otherwise fall back to image.ndim only + if is_batch is not None: + pass + elif mode is not None: + is_batch = ( + ndimage.ndim > 3 if Image.getmodebands(mode) > 1 else ndimage.ndim > 2 + ) + elif ndimage.ndim == 2: + is_batch = False + elif ndimage.ndim == 3 and ndimage.shape[-1] == 1: + raise ValueError("Can't write images with one color channel.") + elif ndimage.ndim == 3 and ndimage.shape[-1] in [2, 3, 4]: + # Note: this makes a channel-last assumption + is_batch = False + else: + is_batch = True + + if not is_batch: + ndimage = ndimage[None, ...] + + for frame in ndimage: + pil_frame = Image.fromarray(frame, mode=mode) + if "bits" in kwargs: + pil_frame = pil_frame.quantize(colors=2 ** kwargs["bits"]) + self.images_to_write.append(pil_frame) + + if ( + format is not None + and "format" in self.save_args + and self.save_args["format"] != format + ): + old_format = self.save_args["format"] + warnings.warn( + "Changing the output format during incremental" + " writes is strongly discouraged." + f" Was `{old_format}`, is now `{format}`.", + UserWarning, + ) + + extension = self.request.extension or self.request.format_hint + self.save_args["format"] = format or Image.registered_extensions()[extension] + self.save_args.update(kwargs) + + # when writing to `bytes` we flush instantly + result = None + if self._request._uri_type == URI_BYTES: + self._flush_writer() + file = cast(BytesIO, self._request.get_file()) + result = file.getvalue() + + return result + + def _flush_writer(self): + if len(self.images_to_write) == 0: + return + + primary_image = self.images_to_write.pop(0) + + if len(self.images_to_write) > 0: + self.save_args["save_all"] = True + self.save_args["append_images"] = self.images_to_write + + primary_image.save(self._request.get_file(), **self.save_args) + self.images_to_write.clear() + self.save_args.clear() + + def get_meta(self, *, index=0) -> Dict[str, Any]: + return self.metadata(index=index, exclude_applied=False) + + def metadata( + self, index: int = None, exclude_applied: bool = True + ) -> Dict[str, Any]: + """Read ndimage metadata. + + Parameters + ---------- + index : {integer, None} + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return its + metadata. If index is an ellipsis (...), read and return global + metadata. If index is None, this plugin reads metadata from the + first image of the file (index=0) unless the image is a GIF or APNG, + in which case global metadata is read (index=...). + exclude_applied : bool + If True, exclude metadata fields that are applied to the image while + reading. For example, if the binary data contains a rotation flag, + the image is rotated by default and the rotation flag is excluded + from the metadata to avoid confusion. + + Returns + ------- + metadata : dict + A dictionary of format-specific metadata. + + """ + + if index is None: + if self._image.format == "GIF": + index = Ellipsis + elif self._image.custom_mimetype == "image/apng": + index = Ellipsis + else: + index = 0 + + if isinstance(index, int) and self._image.tell() != index: + self._image.seek(index) + + metadata = self._image.info.copy() + metadata["mode"] = self._image.mode + metadata["shape"] = self._image.size + + if self._image.mode == "P" and not exclude_applied: + metadata["palette"] = np.asarray(tuple(self._image.palette.colors.keys())) + + if self._image.getexif(): + exif_data = { + ExifTags.TAGS.get(key, "unknown"): value + for key, value in dict(self._image.getexif()).items() + } + exif_data.pop("unknown", None) + metadata.update(exif_data) + + if exclude_applied: + metadata.pop("Orientation", None) + + return metadata + + def properties(self, index: int = None) -> ImageProperties: + """Standardized ndimage metadata + Parameters + ---------- + index : int + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return its + properties. If index is an ellipsis (...), read and return the + properties of all ndimages in the file stacked along a new batch + dimension. If index is None, this plugin reads and returns the + properties of the first image (index=0) unless the image is a GIF or + APNG, in which case it reads and returns the properties all images + (index=...). + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + This does not decode pixel data and is fast for large images. + + """ + + if index is None: + if self._image.format == "GIF": + index = Ellipsis + elif self._image.custom_mimetype == "image/apng": + index = Ellipsis + else: + index = 0 + + if index is Ellipsis: + self._image.seek(0) + else: + self._image.seek(index) + + if self._image.mode == "P": + # mode of palette images is determined by their palette + mode = self._image.palette.mode + else: + mode = self._image.mode + + width: int = self._image.width + height: int = self._image.height + shape: Tuple[int, ...] = (height, width) + + n_frames: Optional[int] = None + if index is ...: + n_frames = getattr(self._image, "n_frames", 1) + shape = (n_frames, *shape) + + dummy = np.asarray(Image.new(mode, (1, 1))) + pil_shape: Tuple[int, ...] = dummy.shape + if len(pil_shape) > 2: + shape = (*shape, *pil_shape[2:]) + + return ImageProperties( + shape=shape, + dtype=dummy.dtype, + n_images=n_frames, + is_batch=index is Ellipsis, + ) diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/pillow_info.py b/parrot/lib/python3.10/site-packages/imageio/plugins/pillow_info.py new file mode 100644 index 0000000000000000000000000000000000000000..59b971ce792cca172764da7f2faf8f0654547643 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/pillow_info.py @@ -0,0 +1,1053 @@ +# -*- coding: utf-8 -*- + +# styletest: ignore E122 E123 E501 + +""" +Module that contain info about the Pillow formats. The first part of +this module generates this info and writes it to its own bottom half +if run as a script. +""" + +import warnings + +warnings.warn( + "The `PillowFormat` plugin is deprecated and will be removed in ImageIO v3." + " Use the new `PillowPlugin` instead.", + DeprecationWarning, +) + + +def generate_info(): # pragma: no cover + from urllib.request import urlopen + import PIL + from PIL import Image + + Image.init() + + ids = [] + formats = [] + docs = {} + + # Collect formats and their summary from plugin modules + for mod_name in dir(PIL): + if "ImagePlugin" in mod_name: + mod = getattr(PIL, mod_name) + for ob_name in dir(mod): + ob = getattr(mod, ob_name) + if isinstance(ob, type) and issubclass(ob, Image.Image): + if ob.format in ids: + print("Found duplicate for", ob.format) + else: + ids.append(ob.format) + formats.append((ob.format, ob.format_description)) + + # Add extension info + for i in range(len(formats)): + id, summary = formats[i] + ext = " ".join([e for e in Image.EXTENSION if Image.EXTENSION[e] == id]) + formats[i] = id, summary, ext + + # Get documentation of formats + url = "https://raw.githubusercontent.com/python-pillow/Pillow/master/docs/handbook/image-file-formats.rst" # noqa + lines = urlopen(url).read().decode().splitlines() + lines.append("End") + lines.append("---") # for the end + + # Parse documentation + cur_name = "" + cur_part = [] + for i in range(len(lines)): + line = lines[i] + if line.startswith(("^^^", "---", "===")): + if cur_name and cur_name in ids: + text = "\n".join(cur_part[:-1]) + text = text.replace("versionadded::", "versionadded:: Pillow ") + text = text.replace("Image.open`", "Image.write`") + docs[cur_name] = text + cur_part = [] + cur_name = lines[i - 1].strip().replace(" ", "").upper() + else: + cur_part.append(" " + line) + + # Fill in the blancs + for id in ids: + if id in docs: + docs[id] = "*From the Pillow docs:*\n\n" + docs[id] + else: + docs[id] = "No docs for %s." % id + print("no docs for", id) + + # Sort before writing + formats.sort(key=lambda x: x[0]) + ids.sort() + + # Read file ... + code = open(__file__, "rb").read().decode() + code, divider, _ = code.partition("## BELOW IS " + "AUTOGENERATED") + code += divider + "\n\n" + + # Write formats + code += "pillow_formats = [\n" + for i in range(len(formats)): + print(formats[i]) + code += " (%r, %r, %r),\n" % formats[i] + code += " ]\n\n\n" + + # Write docs + code += "pillow_docs = {\n" + for id in ids: + code += '%r:\nu"""%s""",\n' % (id, docs[id]) + code += "}\n" + + # Write back + with open(__file__, "wb") as f: + f.write(code.encode()) + + +if __name__ == "__main__": + generate_info() + + +# BELOW IS AUTOGENERATED + +pillow_formats = [ + ("BMP", "Windows Bitmap", ".bmp"), + ("BUFR", "BUFR", ".bufr"), + ("CUR", "Windows Cursor", ".cur"), + ("DCX", "Intel DCX", ".dcx"), + ("DDS", "DirectDraw Surface", ".dds"), + ("DIB", "Windows Bitmap", ""), + ("EPS", "Encapsulated Postscript", ".ps .eps"), + ("FITS", "FITS", ".fit .fits"), + ("FLI", "Autodesk FLI/FLC Animation", ".fli .flc"), + ("FPX", "FlashPix", ".fpx"), + ("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu"), + ("GBR", "GIMP brush file", ".gbr"), + ("GIF", "Compuserve GIF", ".gif"), + ("GRIB", "GRIB", ".grib"), + ("HDF5", "HDF5", ".h5 .hdf"), + ("ICNS", "Mac OS icns resource", ".icns"), + ("ICO", "Windows Icon", ".ico"), + ("IM", "IFUNC Image Memory", ".im"), + ("IMT", "IM Tools", ""), + ("IPTC", "IPTC/NAA", ".iim"), + ("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg"), + ("JPEG2000", "JPEG 2000 (ISO 15444)", ".jp2 .j2k .jpc .jpf .jpx .j2c"), + ("MCIDAS", "McIdas area file", ""), + ("MIC", "Microsoft Image Composer", ".mic"), + ("MPEG", "MPEG", ".mpg .mpeg"), + ("MPO", "MPO (CIPA DC-007)", ".mpo"), + ("MSP", "Windows Paint", ".msp"), + ("PCD", "Kodak PhotoCD", ".pcd"), + ("PCX", "Paintbrush", ".pcx"), + ("PIXAR", "PIXAR raster image", ".pxr"), + ("PNG", "Portable network graphics", ".png"), + ("PPM", "Pbmplus image", ".pbm .pgm .ppm"), + ("PSD", "Adobe Photoshop", ".psd"), + ("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi"), + ("SPIDER", "Spider 2D image", ""), + ("SUN", "Sun Raster File", ".ras"), + ("TGA", "Targa", ".tga"), + ("TIFF", "Adobe TIFF", ".tif .tiff"), + ("WMF", "Windows Metafile", ".wmf .emf"), + ("XBM", "X11 Bitmap", ".xbm"), + ("XPM", "X11 Pixel Map", ".xpm"), + ("XVThumb", "XV thumbnail image", ""), +] + + +pillow_docs = { + "BMP": """*From the Pillow docs:* + + + PIL reads and writes Windows and OS/2 BMP files containing ``1``, ``L``, ``P``, + or ``RGB`` data. 16-colour images are read as ``P`` images. Run-length encoding + is not supported. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **compression** + Set to ``bmp_rle`` if the file is run-length encoded. + """, + "BUFR": """*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.3 + + PIL provides a stub driver for BUFR files. + + To add read or write support to your application, use + :py:func:`PIL.BufrStubImagePlugin.register_handler`. + """, + "CUR": """*From the Pillow docs:* + + + CUR is used to store cursors on Windows. The CUR decoder reads the largest + available cursor. Animated cursors are not supported. + """, + "DCX": """*From the Pillow docs:* + + + DCX is a container file format for PCX files, defined by Intel. The DCX format + is commonly used in fax applications. The DCX decoder can read files containing + ``1``, ``L``, ``P``, or ``RGB`` data. + + When the file is opened, only the first image is read. You can use + :py:meth:`~file.seek` or :py:mod:`~PIL.ImageSequence` to read other images. + + """, + "DDS": """*From the Pillow docs:* + + + DDS is a popular container texture format used in video games and natively + supported by DirectX. + Currently, DXT1, DXT3, and DXT5 pixel formats are supported and only in ``RGBA`` + mode. + + .. versionadded:: Pillow 3.4.0 DXT3 + """, + "DIB": """No docs for DIB.""", + "EPS": """*From the Pillow docs:* + + + PIL identifies EPS files containing image data, and can read files that contain + embedded raster images (ImageData descriptors). If Ghostscript is available, + other EPS files can be read as well. The EPS driver can also write EPS + images. The EPS driver can read EPS images in ``L``, ``LAB``, ``RGB`` and + ``CMYK`` mode, but Ghostscript may convert the images to ``RGB`` mode rather + than leaving them in the original color space. The EPS driver can write images + in ``L``, ``RGB`` and ``CMYK`` modes. + + If Ghostscript is available, you can call the :py:meth:`~PIL.Image.Image.load` + method with the following parameter to affect how Ghostscript renders the EPS + + **scale** + Affects the scale of the resultant rasterized image. If the EPS suggests + that the image be rendered at 100px x 100px, setting this parameter to + 2 will make the Ghostscript render a 200px x 200px image instead. The + relative position of the bounding box is maintained:: + + im = Image.open(...) + im.size #(100,100) + im.load(scale=2) + im.size #(200,200) + """, + "FITS": """*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.5 + + PIL provides a stub driver for FITS files. + + To add read or write support to your application, use + :py:func:`PIL.FitsStubImagePlugin.register_handler`. + """, + "FLI": """No docs for FLI.""", + "FPX": """*From the Pillow docs:* + + + PIL reads Kodak FlashPix files. In the current version, only the highest + resolution image is read from the file, and the viewing transform is not taken + into account. + + .. note:: + + To enable full FlashPix support, you need to build and install the IJG JPEG + library before building the Python Imaging Library. See the distribution + README for details. + """, + "FTEX": """*From the Pillow docs:* + + + .. versionadded:: Pillow 3.2.0 + + The FTEX decoder reads textures used for 3D objects in + Independence War 2: Edge Of Chaos. The plugin reads a single texture + per file, in the compressed and uncompressed formats. + """, + "GBR": """*From the Pillow docs:* + + + The GBR decoder reads GIMP brush files, version 1 and 2. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **comment** + The brush name. + + **spacing** + The spacing between the brushes, in pixels. Version 2 only. + + GD + ^^ + + PIL reads uncompressed GD files. Note that this file format cannot be + automatically identified, so you must use :py:func:`PIL.GdImageFile.open` to + read such a file. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **transparency** + Transparency color index. This key is omitted if the image is not + transparent. + """, + "GIF": """*From the Pillow docs:* + + + PIL reads GIF87a and GIF89a versions of the GIF file format. The library writes + run-length encoded files in GIF87a by default, unless GIF89a features + are used or GIF89a is already in use. + + Note that GIF files are always read as grayscale (``L``) + or palette mode (``P``) images. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **background** + Default background color (a palette color index). + + **transparency** + Transparency color index. This key is omitted if the image is not + transparent. + + **version** + Version (either ``GIF87a`` or ``GIF89a``). + + **duration** + May not be present. The time to display the current frame + of the GIF, in milliseconds. + + **loop** + May not be present. The number of times the GIF should loop. + + Reading sequences + ~~~~~~~~~~~~~~~~~ + + The GIF loader supports the :py:meth:`~file.seek` and :py:meth:`~file.tell` + methods. You can seek to the next frame (``im.seek(im.tell() + 1)``), or rewind + the file by seeking to the first frame. Random access is not supported. + + ``im.seek()`` raises an ``EOFError`` if you try to seek after the last frame. + + Saving + ~~~~~~ + + When calling :py:meth:`~PIL.Image.Image.save`, the following options + are available:: + + im.save(out, save_all=True, append_images=[im1, im2, ...]) + + **save_all** + If present and true, all frames of the image will be saved. If + not, then only the first frame of a multiframe image will be saved. + + **append_images** + A list of images to append as additional frames. Each of the + images in the list can be single or multiframe images. + This is currently only supported for GIF, PDF, TIFF, and WebP. + + **duration** + The display duration of each frame of the multiframe gif, in + milliseconds. Pass a single integer for a constant duration, or a + list or tuple to set the duration for each frame separately. + + **loop** + Integer number of times the GIF should loop. + + **optimize** + If present and true, attempt to compress the palette by + eliminating unused colors. This is only useful if the palette can + be compressed to the next smaller power of 2 elements. + + **palette** + Use the specified palette for the saved image. The palette should + be a bytes or bytearray object containing the palette entries in + RGBRGB... form. It should be no more than 768 bytes. Alternately, + the palette can be passed in as an + :py:class:`PIL.ImagePalette.ImagePalette` object. + + **disposal** + Indicates the way in which the graphic is to be treated after being displayed. + + * 0 - No disposal specified. + * 1 - Do not dispose. + * 2 - Restore to background color. + * 3 - Restore to previous content. + + Pass a single integer for a constant disposal, or a list or tuple + to set the disposal for each frame separately. + + Reading local images + ~~~~~~~~~~~~~~~~~~~~ + + The GIF loader creates an image memory the same size as the GIF file’s *logical + screen size*, and pastes the actual pixel data (the *local image*) into this + image. If you only want the actual pixel rectangle, you can manipulate the + :py:attr:`~PIL.Image.Image.size` and :py:attr:`~PIL.Image.Image.tile` + attributes before loading the file:: + + im = Image.open(...) + + if im.tile[0][0] == "gif": + # only read the first "local image" from this GIF file + tag, (x0, y0, x1, y1), offset, extra = im.tile[0] + im.size = (x1 - x0, y1 - y0) + im.tile = [(tag, (0, 0) + im.size, offset, extra)] + """, + "GRIB": """*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.5 + + PIL provides a stub driver for GRIB files. + + The driver requires the file to start with a GRIB header. If you have files + with embedded GRIB data, or files with multiple GRIB fields, your application + has to seek to the header before passing the file handle to PIL. + + To add read or write support to your application, use + :py:func:`PIL.GribStubImagePlugin.register_handler`. + """, + "HDF5": """*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.5 + + PIL provides a stub driver for HDF5 files. + + To add read or write support to your application, use + :py:func:`PIL.Hdf5StubImagePlugin.register_handler`. + """, + "ICNS": """*From the Pillow docs:* + + + PIL reads and (macOS only) writes macOS ``.icns`` files. By default, the + largest available icon is read, though you can override this by setting the + :py:attr:`~PIL.Image.Image.size` property before calling + :py:meth:`~PIL.Image.Image.load`. The :py:meth:`~PIL.Image.Image.write` method + sets the following :py:attr:`~PIL.Image.Image.info` property: + + **sizes** + A list of supported sizes found in this icon file; these are a + 3-tuple, ``(width, height, scale)``, where ``scale`` is 2 for a retina + icon and 1 for a standard icon. You *are* permitted to use this 3-tuple + format for the :py:attr:`~PIL.Image.Image.size` property if you set it + before calling :py:meth:`~PIL.Image.Image.load`; after loading, the size + will be reset to a 2-tuple containing pixel dimensions (so, e.g. if you + ask for ``(512, 512, 2)``, the final value of + :py:attr:`~PIL.Image.Image.size` will be ``(1024, 1024)``). + """, + "ICO": """*From the Pillow docs:* + + + ICO is used to store icons on Windows. The largest available icon is read. + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **sizes** + A list of sizes including in this ico file; these are a 2-tuple, + ``(width, height)``; Default to ``[(16, 16), (24, 24), (32, 32), (48, 48), + (64, 64), (128, 128), (256, 256)]``. Any sizes bigger than the original + size or 256 will be ignored. + + IM + ^^ + + IM is a format used by LabEye and other applications based on the IFUNC image + processing library. The library reads and writes most uncompressed interchange + versions of this format. + + IM is the only format that can store all internal PIL formats. + """, + "IM": """No docs for IM.""", + "IMT": """*From the Pillow docs:* + + + PIL reads Image Tools images containing ``L`` data. + """, + "IPTC": """No docs for IPTC.""", + "JPEG": """*From the Pillow docs:* + + + PIL reads JPEG, JFIF, and Adobe JPEG files containing ``L``, ``RGB``, or + ``CMYK`` data. It writes standard and progressive JFIF files. + + Using the :py:meth:`~PIL.Image.Image.draft` method, you can speed things up by + converting ``RGB`` images to ``L``, and resize images to 1/2, 1/4 or 1/8 of + their original size while loading them. + + The :py:meth:`~PIL.Image.Image.write` method may set the following + :py:attr:`~PIL.Image.Image.info` properties if available: + + **jfif** + JFIF application marker found. If the file is not a JFIF file, this key is + not present. + + **jfif_version** + A tuple representing the jfif version, (major version, minor version). + + **jfif_density** + A tuple representing the pixel density of the image, in units specified + by jfif_unit. + + **jfif_unit** + Units for the jfif_density: + + * 0 - No Units + * 1 - Pixels per Inch + * 2 - Pixels per Centimeter + + **dpi** + A tuple representing the reported pixel density in pixels per inch, if + the file is a jfif file and the units are in inches. + + **adobe** + Adobe application marker found. If the file is not an Adobe JPEG file, this + key is not present. + + **adobe_transform** + Vendor Specific Tag. + + **progression** + Indicates that this is a progressive JPEG file. + + **icc_profile** + The ICC color profile for the image. + + **exif** + Raw EXIF data from the image. + + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **quality** + The image quality, on a scale from 1 (worst) to 95 (best). The default is + 75. Values above 95 should be avoided; 100 disables portions of the JPEG + compression algorithm, and results in large files with hardly any gain in + image quality. + + **optimize** + If present and true, indicates that the encoder should make an extra pass + over the image in order to select optimal encoder settings. + + **progressive** + If present and true, indicates that this image should be stored as a + progressive JPEG file. + + **dpi** + A tuple of integers representing the pixel density, ``(x,y)``. + + **icc_profile** + If present and true, the image is stored with the provided ICC profile. + If this parameter is not provided, the image will be saved with no profile + attached. To preserve the existing profile:: + + im.save(filename, 'jpeg', icc_profile=im.info.get('icc_profile')) + + **exif** + If present, the image will be stored with the provided raw EXIF data. + + **subsampling** + If present, sets the subsampling for the encoder. + + * ``keep``: Only valid for JPEG files, will retain the original image setting. + * ``4:4:4``, ``4:2:2``, ``4:2:0``: Specific sampling values + * ``-1``: equivalent to ``keep`` + * ``0``: equivalent to ``4:4:4`` + * ``1``: equivalent to ``4:2:2`` + * ``2``: equivalent to ``4:2:0`` + + **qtables** + If present, sets the qtables for the encoder. This is listed as an + advanced option for wizards in the JPEG documentation. Use with + caution. ``qtables`` can be one of several types of values: + + * a string, naming a preset, e.g. ``keep``, ``web_low``, or ``web_high`` + * a list, tuple, or dictionary (with integer keys = + range(len(keys))) of lists of 64 integers. There must be + between 2 and 4 tables. + + .. versionadded:: Pillow 2.5.0 + + + .. note:: + + To enable JPEG support, you need to build and install the IJG JPEG library + before building the Python Imaging Library. See the distribution README for + details. + """, + "JPEG2000": """*From the Pillow docs:* + + + .. versionadded:: Pillow 2.4.0 + + PIL reads and writes JPEG 2000 files containing ``L``, ``LA``, ``RGB`` or + ``RGBA`` data. It can also read files containing ``YCbCr`` data, which it + converts on read into ``RGB`` or ``RGBA`` depending on whether or not there is + an alpha channel. PIL supports JPEG 2000 raw codestreams (``.j2k`` files), as + well as boxed JPEG 2000 files (``.j2p`` or ``.jpx`` files). PIL does *not* + support files whose components have different sampling frequencies. + + When loading, if you set the ``mode`` on the image prior to the + :py:meth:`~PIL.Image.Image.load` method being invoked, you can ask PIL to + convert the image to either ``RGB`` or ``RGBA`` rather than choosing for + itself. It is also possible to set ``reduce`` to the number of resolutions to + discard (each one reduces the size of the resulting image by a factor of 2), + and ``layers`` to specify the number of quality layers to load. + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **offset** + The image offset, as a tuple of integers, e.g. (16, 16) + + **tile_offset** + The tile offset, again as a 2-tuple of integers. + + **tile_size** + The tile size as a 2-tuple. If not specified, or if set to None, the + image will be saved without tiling. + + **quality_mode** + Either `"rates"` or `"dB"` depending on the units you want to use to + specify image quality. + + **quality_layers** + A sequence of numbers, each of which represents either an approximate size + reduction (if quality mode is `"rates"`) or a signal to noise ratio value + in decibels. If not specified, defaults to a single layer of full quality. + + **num_resolutions** + The number of different image resolutions to be stored (which corresponds + to the number of Discrete Wavelet Transform decompositions plus one). + + **codeblock_size** + The code-block size as a 2-tuple. Minimum size is 4 x 4, maximum is 1024 x + 1024, with the additional restriction that no code-block may have more + than 4096 coefficients (i.e. the product of the two numbers must be no + greater than 4096). + + **precinct_size** + The precinct size as a 2-tuple. Must be a power of two along both axes, + and must be greater than the code-block size. + + **irreversible** + If ``True``, use the lossy Irreversible Color Transformation + followed by DWT 9-7. Defaults to ``False``, which means to use the + Reversible Color Transformation with DWT 5-3. + + **progression** + Controls the progression order; must be one of ``"LRCP"``, ``"RLCP"``, + ``"RPCL"``, ``"PCRL"``, ``"CPRL"``. The letters stand for Component, + Position, Resolution and Layer respectively and control the order of + encoding, the idea being that e.g. an image encoded using LRCP mode can + have its quality layers decoded as they arrive at the decoder, while one + encoded using RLCP mode will have increasing resolutions decoded as they + arrive, and so on. + + **cinema_mode** + Set the encoder to produce output compliant with the digital cinema + specifications. The options here are ``"no"`` (the default), + ``"cinema2k-24"`` for 24fps 2K, ``"cinema2k-48"`` for 48fps 2K, and + ``"cinema4k-24"`` for 24fps 4K. Note that for compliant 2K files, + *at least one* of your image dimensions must match 2048 x 1080, while + for compliant 4K files, *at least one* of the dimensions must match + 4096 x 2160. + + .. note:: + + To enable JPEG 2000 support, you need to build and install the OpenJPEG + library, version 2.0.0 or higher, before building the Python Imaging + Library. + + Windows users can install the OpenJPEG binaries available on the + OpenJPEG website, but must add them to their PATH in order to use PIL (if + you fail to do this, you will get errors about not being able to load the + ``_imaging`` DLL). + """, + "MCIDAS": """*From the Pillow docs:* + + + PIL identifies and reads 8-bit McIdas area files. + """, + "MIC": """*From the Pillow docs:* + + + PIL identifies and reads Microsoft Image Composer (MIC) files. When opened, the + first sprite in the file is loaded. You can use :py:meth:`~file.seek` and + :py:meth:`~file.tell` to read other sprites from the file. + + Note that there may be an embedded gamma of 2.2 in MIC files. + """, + "MPEG": """*From the Pillow docs:* + + + PIL identifies MPEG files. + """, + "MPO": """*From the Pillow docs:* + + + Pillow identifies and reads Multi Picture Object (MPO) files, loading the primary + image when first opened. The :py:meth:`~file.seek` and :py:meth:`~file.tell` + methods may be used to read other pictures from the file. The pictures are + zero-indexed and random access is supported. + """, + "MSP": """*From the Pillow docs:* + + + PIL identifies and reads MSP files from Windows 1 and 2. The library writes + uncompressed (Windows 1) versions of this format. + """, + "PCD": """*From the Pillow docs:* + + + PIL reads PhotoCD files containing ``RGB`` data. This only reads the 768x512 + resolution image from the file. Higher resolutions are encoded in a proprietary + encoding. + """, + "PCX": """*From the Pillow docs:* + + + PIL reads and writes PCX files containing ``1``, ``L``, ``P``, or ``RGB`` data. + """, + "PIXAR": """*From the Pillow docs:* + + + PIL provides limited support for PIXAR raster files. The library can identify + and read “dumped” RGB files. + + The format code is ``PIXAR``. + """, + "PNG": """*From the Pillow docs:* + + + PIL identifies, reads, and writes PNG files containing ``1``, ``L``, ``P``, + ``RGB``, or ``RGBA`` data. Interlaced files are supported as of v1.1.7. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties, when appropriate: + + **chromaticity** + The chromaticity points, as an 8 tuple of floats. (``White Point + X``, ``White Point Y``, ``Red X``, ``Red Y``, ``Green X``, ``Green + Y``, ``Blue X``, ``Blue Y``) + + **gamma** + Gamma, given as a floating point number. + + **srgb** + The sRGB rendering intent as an integer. + + * 0 Perceptual + * 1 Relative Colorimetric + * 2 Saturation + * 3 Absolute Colorimetric + + **transparency** + For ``P`` images: Either the palette index for full transparent pixels, + or a byte string with alpha values for each palette entry. + + For ``L`` and ``RGB`` images, the color that represents full transparent + pixels in this image. + + This key is omitted if the image is not a transparent palette image. + + ``Open`` also sets ``Image.text`` to a list of the values of the + ``tEXt``, ``zTXt``, and ``iTXt`` chunks of the PNG image. Individual + compressed chunks are limited to a decompressed size of + ``PngImagePlugin.MAX_TEXT_CHUNK``, by default 1MB, to prevent + decompression bombs. Additionally, the total size of all of the text + chunks is limited to ``PngImagePlugin.MAX_TEXT_MEMORY``, defaulting to + 64MB. + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **optimize** + If present and true, instructs the PNG writer to make the output file as + small as possible. This includes extra processing in order to find optimal + encoder settings. + + **transparency** + For ``P``, ``L``, and ``RGB`` images, this option controls what + color image to mark as transparent. + + For ``P`` images, this can be a either the palette index, + or a byte string with alpha values for each palette entry. + + **dpi** + A tuple of two numbers corresponding to the desired dpi in each direction. + + **pnginfo** + A :py:class:`PIL.PngImagePlugin.PngInfo` instance containing text tags. + + **compress_level** + ZLIB compression level, a number between 0 and 9: 1 gives best speed, + 9 gives best compression, 0 gives no compression at all. Default is 6. + When ``optimize`` option is True ``compress_level`` has no effect + (it is set to 9 regardless of a value passed). + + **icc_profile** + The ICC Profile to include in the saved file. + + **bits (experimental)** + For ``P`` images, this option controls how many bits to store. If omitted, + the PNG writer uses 8 bits (256 colors). + + **dictionary (experimental)** + Set the ZLIB encoder dictionary. + + .. note:: + + To enable PNG support, you need to build and install the ZLIB compression + library before building the Python Imaging Library. See the installation + documentation for details. + """, + "PPM": """*From the Pillow docs:* + + + PIL reads and writes PBM, PGM and PPM files containing ``1``, ``L`` or ``RGB`` + data. + """, + "PSD": """*From the Pillow docs:* + + + PIL identifies and reads PSD files written by Adobe Photoshop 2.5 and 3.0. + + """, + "SGI": """*From the Pillow docs:* + + + Pillow reads and writes uncompressed ``L``, ``RGB``, and ``RGBA`` files. + + """, + "SPIDER": """*From the Pillow docs:* + + + PIL reads and writes SPIDER image files of 32-bit floating point data + ("F;32F"). + + PIL also reads SPIDER stack files containing sequences of SPIDER images. The + :py:meth:`~file.seek` and :py:meth:`~file.tell` methods are supported, and + random access is allowed. + + The :py:meth:`~PIL.Image.Image.write` method sets the following attributes: + + **format** + Set to ``SPIDER`` + + **istack** + Set to 1 if the file is an image stack, else 0. + + **nimages** + Set to the number of images in the stack. + + A convenience method, :py:meth:`~PIL.Image.Image.convert2byte`, is provided for + converting floating point data to byte data (mode ``L``):: + + im = Image.open('image001.spi').convert2byte() + + Writing files in SPIDER format + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The extension of SPIDER files may be any 3 alphanumeric characters. Therefore + the output format must be specified explicitly:: + + im.save('newimage.spi', format='SPIDER') + + For more information about the SPIDER image processing package, see the + `SPIDER homepage`_ at `Wadsworth Center`_. + + .. _SPIDER homepage: https://spider.wadsworth.org/spider_doc/spider/docs/spider.html + .. _Wadsworth Center: https://www.wadsworth.org/ + """, + "SUN": """No docs for SUN.""", + "TGA": """*From the Pillow docs:* + + + PIL reads 24- and 32-bit uncompressed and run-length encoded TGA files. + """, + "TIFF": """*From the Pillow docs:* + + + Pillow reads and writes TIFF files. It can read both striped and tiled + images, pixel and plane interleaved multi-band images. If you have + libtiff and its headers installed, PIL can read and write many kinds + of compressed TIFF files. If not, PIL will only read and write + uncompressed files. + + .. note:: + + Beginning in version 5.0.0, Pillow requires libtiff to read or + write compressed files. Prior to that release, Pillow had buggy + support for reading Packbits, LZW and JPEG compressed TIFFs + without using libtiff. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **compression** + Compression mode. + + .. versionadded:: Pillow 2.0.0 + + **dpi** + Image resolution as an ``(xdpi, ydpi)`` tuple, where applicable. You can use + the :py:attr:`~PIL.Image.Image.tag` attribute to get more detailed + information about the image resolution. + + .. versionadded:: Pillow 1.1.5 + + **resolution** + Image resolution as an ``(xres, yres)`` tuple, where applicable. This is a + measurement in whichever unit is specified by the file. + + .. versionadded:: Pillow 1.1.5 + + + The :py:attr:`~PIL.Image.Image.tag_v2` attribute contains a dictionary + of TIFF metadata. The keys are numerical indexes from + :py:attr:`~PIL.TiffTags.TAGS_V2`. Values are strings or numbers for single + items, multiple values are returned in a tuple of values. Rational + numbers are returned as a :py:class:`~PIL.TiffImagePlugin.IFDRational` + object. + + .. versionadded:: Pillow 3.0.0 + + For compatibility with legacy code, the + :py:attr:`~PIL.Image.Image.tag` attribute contains a dictionary of + decoded TIFF fields as returned prior to version 3.0.0. Values are + returned as either strings or tuples of numeric values. Rational + numbers are returned as a tuple of ``(numerator, denominator)``. + + .. deprecated:: 3.0.0 + + + Saving Tiff Images + ~~~~~~~~~~~~~~~~~~ + + The :py:meth:`~PIL.Image.Image.save` method can take the following keyword arguments: + + **save_all** + If true, Pillow will save all frames of the image to a multiframe tiff document. + + .. versionadded:: Pillow 3.4.0 + + **tiffinfo** + A :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` object or dict + object containing tiff tags and values. The TIFF field type is + autodetected for Numeric and string values, any other types + require using an :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + object and setting the type in + :py:attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype` with + the appropriate numerical value from + ``TiffTags.TYPES``. + + .. versionadded:: Pillow 2.3.0 + + Metadata values that are of the rational type should be passed in + using a :py:class:`~PIL.TiffImagePlugin.IFDRational` object. + + .. versionadded:: Pillow 3.1.0 + + For compatibility with legacy code, a + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` object may + be passed in this field. However, this is deprecated. + + .. versionadded:: Pillow 3.0.0 + + .. note:: + + Only some tags are currently supported when writing using + libtiff. The supported list is found in + :py:attr:`~PIL:TiffTags.LIBTIFF_CORE`. + + **compression** + A string containing the desired compression method for the + file. (valid only with libtiff installed) Valid compression + methods are: ``None``, ``"tiff_ccitt"``, ``"group3"``, + ``"group4"``, ``"tiff_jpeg"``, ``"tiff_adobe_deflate"``, + ``"tiff_thunderscan"``, ``"tiff_deflate"``, ``"tiff_sgilog"``, + ``"tiff_sgilog24"``, ``"tiff_raw_16"`` + + These arguments to set the tiff header fields are an alternative to + using the general tags available through tiffinfo. + + **description** + + **software** + + **date_time** + + **artist** + + **copyright** + Strings + + **resolution_unit** + A string of "inch", "centimeter" or "cm" + + **resolution** + + **x_resolution** + + **y_resolution** + + **dpi** + Either a Float, 2 tuple of (numerator, denominator) or a + :py:class:`~PIL.TiffImagePlugin.IFDRational`. Resolution implies + an equal x and y resolution, dpi also implies a unit of inches. + + """, + "WMF": """*From the Pillow docs:* + + + PIL can identify playable WMF files. + + In PIL 1.1.4 and earlier, the WMF driver provides some limited rendering + support, but not enough to be useful for any real application. + + In PIL 1.1.5 and later, the WMF driver is a stub driver. To add WMF read or + write support to your application, use + :py:func:`PIL.WmfImagePlugin.register_handler` to register a WMF handler. + + :: + + from PIL import Image + from PIL import WmfImagePlugin + + class WmfHandler: + def open(self, im): + ... + def load(self, im): + ... + return image + def save(self, im, fp, filename): + ... + + wmf_handler = WmfHandler() + + WmfImagePlugin.register_handler(wmf_handler) + + im = Image.open("sample.wmf")""", + "XBM": """*From the Pillow docs:* + + + PIL reads and writes X bitmap files (mode ``1``). + """, + "XPM": """*From the Pillow docs:* + + + PIL reads X pixmap files (mode ``P``) with 256 colors or less. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **transparency** + Transparency color index. This key is omitted if the image is not + transparent. + """, + "XVThumb": """No docs for XVThumb.""", +} diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/pillow_legacy.py b/parrot/lib/python3.10/site-packages/imageio/plugins/pillow_legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..75637ba5122e917aa178a70c82c4f94fd2042ba8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/pillow_legacy.py @@ -0,0 +1,823 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write images using pillow/PIL (legacy). + +Backend Library: `Pillow `_ + +Pillow is a friendly fork of PIL (Python Image Library) and supports +reading and writing of common formats (jpg, png, gif, tiff, ...). While +these docs provide an overview of some of its features, pillow is +constantly improving. Hence, the complete list of features can be found +in pillows official docs (see the Backend Library link). + +Parameters for Reading +---------------------- +pilmode : str + (Available for all formats except GIF-PIL) + From the Pillow documentation: + + * 'L' (8-bit pixels, grayscale) + * 'P' (8-bit pixels, mapped to any other mode using a color palette) + * 'RGB' (3x8-bit pixels, true color) + * 'RGBA' (4x8-bit pixels, true color with transparency mask) + * 'CMYK' (4x8-bit pixels, color separation) + * 'YCbCr' (3x8-bit pixels, color video format) + * 'I' (32-bit signed integer pixels) + * 'F' (32-bit floating point pixels) + + PIL also provides limited support for a few special modes, including + 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' + (true color with premultiplied alpha). + + When translating a color image to grayscale (mode 'L', 'I' or 'F'), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 +as_gray : bool + (Available for all formats except GIF-PIL) + If True, the image is converted using mode 'F'. When `mode` is + not None and `as_gray` is True, the image is first converted + according to `mode`, and the result is then "flattened" using + mode 'F'. +ignoregamma : bool + (Only available in PNG-PIL) + Avoid gamma correction. Default True. +exifrotate : bool + (Only available in JPEG-PIL) + Automatically rotate the image according to exif flag. Default True. + + +Parameters for saving +--------------------- +optimize : bool + (Only available in PNG-PIL) + If present and true, instructs the PNG writer to make the output file + as small as possible. This includes extra processing in order to find + optimal encoder settings. +transparency: + (Only available in PNG-PIL) + This option controls what color image to mark as transparent. +dpi: tuple of two scalars + (Only available in PNG-PIL) + The desired dpi in each direction. +pnginfo: PIL.PngImagePlugin.PngInfo + (Only available in PNG-PIL) + Object containing text tags. +compress_level: int + (Only available in PNG-PIL) + ZLIB compression level, a number between 0 and 9: 1 gives best speed, + 9 gives best compression, 0 gives no compression at all. Default is 9. + When ``optimize`` option is True ``compress_level`` has no effect + (it is set to 9 regardless of a value passed). +compression: int + (Only available in PNG-PIL) + Compatibility with the freeimage PNG format. If given, it overrides + compress_level. +icc_profile: + (Only available in PNG-PIL) + The ICC Profile to include in the saved file. +bits (experimental): int + (Only available in PNG-PIL) + This option controls how many bits to store. If omitted, + the PNG writer uses 8 bits (256 colors). +quantize: + (Only available in PNG-PIL) + Compatibility with the freeimage PNG format. If given, it overrides + bits. In this case, given as a number between 1-256. +dictionary (experimental): dict + (Only available in PNG-PIL) + Set the ZLIB encoder dictionary. +prefer_uint8: bool + (Only available in PNG-PIL) + Let the PNG writer truncate uint16 image arrays to uint8 if their values fall + within the range [0, 255]. Defaults to true for legacy compatibility, however + it is recommended to set this to false to avoid unexpected behavior when + saving e.g. weakly saturated images. + +quality : scalar + (Only available in JPEG-PIL) + The compression factor of the saved image (1..100), higher + numbers result in higher quality but larger file size. Default 75. +progressive : bool + (Only available in JPEG-PIL) + Save as a progressive JPEG file (e.g. for images on the web). + Default False. +optimize : bool + (Only available in JPEG-PIL) + On saving, compute optimal Huffman coding tables (can reduce a few + percent of file size). Default False. +dpi : tuple of int + (Only available in JPEG-PIL) + The pixel density, ``(x,y)``. +icc_profile : object + (Only available in JPEG-PIL) + If present and true, the image is stored with the provided ICC profile. + If this parameter is not provided, the image will be saved with no + profile attached. +exif : dict + (Only available in JPEG-PIL) + If present, the image will be stored with the provided raw EXIF data. +subsampling : str + (Only available in JPEG-PIL) + Sets the subsampling for the encoder. See Pillow docs for details. +qtables : object + (Only available in JPEG-PIL) + Set the qtables for the encoder. See Pillow docs for details. +quality_mode : str + (Only available in JPEG2000-PIL) + Either `"rates"` or `"dB"` depending on the units you want to use to + specify image quality. +quality : float + (Only available in JPEG2000-PIL) + Approximate size reduction (if quality mode is `rates`) or a signal to noise ratio + in decibels (if quality mode is `dB`). +loop : int + (Only available in GIF-PIL) + The number of iterations. Default 0 (meaning loop indefinitely). +duration : {float, list} + (Only available in GIF-PIL) + The duration (in milliseconds) of each frame. Either specify one value + that is used for all frames, or one value for each frame. +fps : float + (Only available in GIF-PIL) + The number of frames per second. If duration is not given, the + duration for each frame is set to 1/fps. Default 10. +palettesize : int + (Only available in GIF-PIL) + The number of colors to quantize the image to. Is rounded to + the nearest power of two. Default 256. +subrectangles : bool + (Only available in GIF-PIL) + If True, will try and optimize the GIF by storing only the + rectangular parts of each frame that change with respect to the + previous. Default False. + +Notes +----- +To enable JPEG 2000 support, you need to build and install the OpenJPEG library, +version 2.0.0 or higher, before building the Python Imaging Library. Windows +users can install the OpenJPEG binaries available on the OpenJPEG website, but +must add them to their PATH in order to use PIL (if you fail to do this, you +will get errors about not being able to load the ``_imaging`` DLL). + +GIF images read with this plugin are always RGBA. The alpha channel is ignored +when saving RGB images. +""" + +import logging +import threading + +import numpy as np + +from ..core import Format, image_as_uint +from ..core.request import URI_FILE, URI_BYTES + + +logger = logging.getLogger(__name__) + + +# todo: Pillow ImageGrab module supports grabbing the screen on Win and OSX. + + +GENERIC_DOCS = """ + Parameters for reading + ---------------------- + + pilmode : str + From the Pillow documentation: + + * 'L' (8-bit pixels, grayscale) + * 'P' (8-bit pixels, mapped to any other mode using a color palette) + * 'RGB' (3x8-bit pixels, true color) + * 'RGBA' (4x8-bit pixels, true color with transparency mask) + * 'CMYK' (4x8-bit pixels, color separation) + * 'YCbCr' (3x8-bit pixels, color video format) + * 'I' (32-bit signed integer pixels) + * 'F' (32-bit floating point pixels) + + PIL also provides limited support for a few special modes, including + 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' + (true color with premultiplied alpha). + + When translating a color image to grayscale (mode 'L', 'I' or 'F'), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + as_gray : bool + If True, the image is converted using mode 'F'. When `mode` is + not None and `as_gray` is True, the image is first converted + according to `mode`, and the result is then "flattened" using + mode 'F'. +""" + + +class PillowFormat(Format): + """ + Base format class for Pillow formats. + """ + + _pillow_imported = False + _Image = None + _modes = "i" + _description = "" + + def __init__(self, *args, plugin_id: str = None, **kwargs): + super(PillowFormat, self).__init__(*args, **kwargs) + # Used to synchronize _init_pillow(), see #244 + self._lock = threading.RLock() + + self._plugin_id = plugin_id + + @property + def plugin_id(self): + """The PIL plugin id.""" + return self._plugin_id # Set when format is created + + def _init_pillow(self): + with self._lock: + if not self._pillow_imported: + self._pillow_imported = True # more like tried to import + import PIL + + if not hasattr(PIL, "__version__"): # pragma: no cover + raise ImportError( + "Imageio Pillow plugin requires " "Pillow, not PIL!" + ) + from PIL import Image + + self._Image = Image + elif self._Image is None: # pragma: no cover + raise RuntimeError("Imageio Pillow plugin requires " "Pillow lib.") + Image = self._Image + + if self.plugin_id in ("PNG", "JPEG", "BMP", "GIF", "PPM"): + Image.preinit() + else: + Image.init() + return Image + + def _can_read(self, request): + Image = self._init_pillow() + if self.plugin_id in Image.OPEN: + factory, accept = Image.OPEN[self.plugin_id] + if accept: + if request.firstbytes and accept(request.firstbytes): + return True + + def _can_write(self, request): + Image = self._init_pillow() + if request.extension in self.extensions or request._uri_type in [ + URI_FILE, + URI_BYTES, + ]: + if self.plugin_id in Image.SAVE: + return True + + class Reader(Format.Reader): + def _open(self, pilmode=None, as_gray=False): + Image = self.format._init_pillow() + try: + factory, accept = Image.OPEN[self.format.plugin_id] + except KeyError: + raise RuntimeError("Format %s cannot read images." % self.format.name) + self._fp = self._get_file() + self._im = factory(self._fp, "") + if hasattr(Image, "_decompression_bomb_check"): + Image._decompression_bomb_check(self._im.size) + # Save the raw mode used by the palette for a BMP because it may not be the number of channels + # When the data is read, imageio hands the palette to PIL to handle and clears the rawmode argument + # However, there is a bug in PIL with handling animated GIFs with a different color palette on each frame. + # This issue is resolved by using the raw palette data but the rawmode information is now lost. So we + # store the raw mode for later use + if self._im.palette and self._im.palette.dirty: + self._im.palette.rawmode_saved = self._im.palette.rawmode + pil_try_read(self._im) + # Store args + self._kwargs = dict( + as_gray=as_gray, is_gray=_palette_is_grayscale(self._im) + ) + # setting mode=None is not the same as just not providing it + if pilmode is not None: + self._kwargs["mode"] = pilmode + # Set length + self._length = 1 + if hasattr(self._im, "n_frames"): + self._length = self._im.n_frames + + def _get_file(self): + self._we_own_fp = False + return self.request.get_file() + + def _close(self): + save_pillow_close(self._im) + if self._we_own_fp: + self._fp.close() + # else: request object handles closing the _fp + + def _get_length(self): + return self._length + + def _seek(self, index): + try: + self._im.seek(index) + except EOFError: + raise IndexError("Could not seek to index %i" % index) + + def _get_data(self, index): + if index >= self._length: + raise IndexError("Image index %i > %i" % (index, self._length)) + i = self._im.tell() + if i > index: + self._seek(index) # just try + else: + while i < index: # some formats need to be read in sequence + i += 1 + self._seek(i) + if self._im.palette and self._im.palette.dirty: + self._im.palette.rawmode_saved = self._im.palette.rawmode + self._im.getdata()[0] + im = pil_get_frame(self._im, **self._kwargs) + return im, self._im.info + + def _get_meta_data(self, index): + if not (index is None or index == 0): + raise IndexError() + return self._im.info + + class Writer(Format.Writer): + def _open(self): + Image = self.format._init_pillow() + try: + self._save_func = Image.SAVE[self.format.plugin_id] + except KeyError: + raise RuntimeError("Format %s cannot write images." % self.format.name) + self._fp = self.request.get_file() + self._meta = {} + self._written = False + + def _close(self): + pass # request object handled closing _fp + + def _append_data(self, im, meta): + if self._written: + raise RuntimeError( + "Format %s only supports single images." % self.format.name + ) + # Pop unit dimension for grayscale images + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + self._written = True + self._meta.update(meta) + img = ndarray_to_pil( + im, self.format.plugin_id, self._meta.pop("prefer_uint8", True) + ) + if "bits" in self._meta: + img = img.quantize() # Make it a P image, so bits arg is used + img.save(self._fp, format=self.format.plugin_id, **self._meta) + save_pillow_close(img) + + def set_meta_data(self, meta): + self._meta.update(meta) + + +class PNGFormat(PillowFormat): + """See :mod:`imageio.plugins.pillow_legacy`""" + + class Reader(PillowFormat.Reader): + def _open(self, pilmode=None, as_gray=False, ignoregamma=True): + return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) + + def _get_data(self, index): + im, info = PillowFormat.Reader._get_data(self, index) + if not self.request.kwargs.get("ignoregamma", True): + # The gamma value in the file represents the gamma factor for the + # hardware on the system where the file was created, and is meant + # to be able to match the colors with the system on which the + # image is shown. See also issue #366 + try: + gamma = float(info["gamma"]) + except (KeyError, ValueError): + pass + else: + scale = float(65536 if im.dtype == np.uint16 else 255) + gain = 1.0 + im[:] = ((im / scale) ** gamma) * scale * gain + 0.4999 + return im, info + + # -- + + class Writer(PillowFormat.Writer): + def _open(self, compression=None, quantize=None, interlaced=False, **kwargs): + # Better default for compression + kwargs["compress_level"] = kwargs.get("compress_level", 9) + + if compression is not None: + if compression < 0 or compression > 9: + raise ValueError("Invalid PNG compression level: %r" % compression) + kwargs["compress_level"] = compression + if quantize is not None: + for bits in range(1, 9): + if 2**bits == quantize: + break + else: + raise ValueError( + "PNG quantize must be power of two, " "not %r" % quantize + ) + kwargs["bits"] = bits + if interlaced: + logger.warning("PIL PNG writer cannot produce interlaced images.") + + ok_keys = ( + "optimize", + "transparency", + "dpi", + "pnginfo", + "bits", + "compress_level", + "icc_profile", + "dictionary", + "prefer_uint8", + ) + for key in kwargs: + if key not in ok_keys: + raise TypeError("Invalid arg for PNG writer: %r" % key) + + PillowFormat.Writer._open(self) + self._meta.update(kwargs) + + def _append_data(self, im, meta): + if str(im.dtype) == "uint16" and (im.ndim == 2 or im.shape[-1] == 1): + im = image_as_uint(im, bitdepth=16) + else: + im = image_as_uint(im, bitdepth=8) + PillowFormat.Writer._append_data(self, im, meta) + + +class JPEGFormat(PillowFormat): + """See :mod:`imageio.plugins.pillow_legacy`""" + + class Reader(PillowFormat.Reader): + def _open(self, pilmode=None, as_gray=False, exifrotate=True): + return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) + + def _get_file(self): + # Pillow uses seek for JPG, so we cannot directly stream from web + if self.request.filename.startswith( + ("http://", "https://") + ) or ".zip/" in self.request.filename.replace("\\", "/"): + self._we_own_fp = True + return open(self.request.get_local_filename(), "rb") + else: + self._we_own_fp = False + return self.request.get_file() + + def _get_data(self, index): + im, info = PillowFormat.Reader._get_data(self, index) + + # Handle exif + if "exif" in info: + from PIL.ExifTags import TAGS + + info["EXIF_MAIN"] = {} + for tag, value in self._im._getexif().items(): + decoded = TAGS.get(tag, tag) + info["EXIF_MAIN"][decoded] = value + + im = self._rotate(im, info) + return im, info + + def _rotate(self, im, meta): + """Use Orientation information from EXIF meta data to + orient the image correctly. Similar code as in FreeImage plugin. + """ + if self.request.kwargs.get("exifrotate", True): + try: + ori = meta["EXIF_MAIN"]["Orientation"] + except KeyError: # pragma: no cover + pass # Orientation not available + else: # pragma: no cover - we cannot touch all cases + # www.impulseadventure.com/photo/exif-orientation.html + if ori in [1, 2]: + pass + if ori in [3, 4]: + im = np.rot90(im, 2) + if ori in [5, 6]: + im = np.rot90(im, 3) + if ori in [7, 8]: + im = np.rot90(im) + if ori in [2, 4, 5, 7]: # Flipped cases (rare) + im = np.fliplr(im) + return im + + # -- + + class Writer(PillowFormat.Writer): + def _open(self, quality=75, progressive=False, optimize=False, **kwargs): + # The JPEG quality can be between 0 (worst) and 100 (best) + quality = int(quality) + if quality < 0 or quality > 100: + raise ValueError("JPEG quality should be between 0 and 100.") + + kwargs["quality"] = quality + kwargs["progressive"] = bool(progressive) + kwargs["optimize"] = bool(progressive) + + PillowFormat.Writer._open(self) + self._meta.update(kwargs) + + def _append_data(self, im, meta): + if im.ndim == 3 and im.shape[-1] == 4: + raise IOError("JPEG does not support alpha channel.") + im = image_as_uint(im, bitdepth=8) + PillowFormat.Writer._append_data(self, im, meta) + return + + +class JPEG2000Format(PillowFormat): + """See :mod:`imageio.plugins.pillow_legacy`""" + + class Reader(PillowFormat.Reader): + def _open(self, pilmode=None, as_gray=False): + return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) + + def _get_file(self): + # Pillow uses seek for JPG, so we cannot directly stream from web + if self.request.filename.startswith( + ("http://", "https://") + ) or ".zip/" in self.request.filename.replace("\\", "/"): + self._we_own_fp = True + return open(self.request.get_local_filename(), "rb") + else: + self._we_own_fp = False + return self.request.get_file() + + def _get_data(self, index): + im, info = PillowFormat.Reader._get_data(self, index) + + # Handle exif + if "exif" in info: + from PIL.ExifTags import TAGS + + info["EXIF_MAIN"] = {} + for tag, value in self._im._getexif().items(): + decoded = TAGS.get(tag, tag) + info["EXIF_MAIN"][decoded] = value + + im = self._rotate(im, info) + return im, info + + def _rotate(self, im, meta): + """Use Orientation information from EXIF meta data to + orient the image correctly. Similar code as in FreeImage plugin. + """ + if self.request.kwargs.get("exifrotate", True): + try: + ori = meta["EXIF_MAIN"]["Orientation"] + except KeyError: # pragma: no cover + pass # Orientation not available + else: # pragma: no cover - we cannot touch all cases + # www.impulseadventure.com/photo/exif-orientation.html + if ori in [1, 2]: + pass + if ori in [3, 4]: + im = np.rot90(im, 2) + if ori in [5, 6]: + im = np.rot90(im, 3) + if ori in [7, 8]: + im = np.rot90(im) + if ori in [2, 4, 5, 7]: # Flipped cases (rare) + im = np.fliplr(im) + return im + + # -- + + class Writer(PillowFormat.Writer): + def _open(self, quality_mode="rates", quality=5, **kwargs): + # Check quality - in Pillow it should be no higher than 95 + if quality_mode not in {"rates", "dB"}: + raise ValueError("Quality mode should be either 'rates' or 'dB'") + + quality = float(quality) + + if quality_mode == "rates" and (quality < 1 or quality > 1000): + raise ValueError( + "The quality value {} seems to be an invalid rate!".format(quality) + ) + elif quality_mode == "dB" and (quality < 15 or quality > 100): + raise ValueError( + "The quality value {} seems to be an invalid PSNR!".format(quality) + ) + + kwargs["quality_mode"] = quality_mode + kwargs["quality_layers"] = [quality] + + PillowFormat.Writer._open(self) + self._meta.update(kwargs) + + def _append_data(self, im, meta): + if im.ndim == 3 and im.shape[-1] == 4: + raise IOError( + "The current implementation of JPEG 2000 does not support alpha channel." + ) + im = image_as_uint(im, bitdepth=8) + PillowFormat.Writer._append_data(self, im, meta) + return + + +def save_pillow_close(im): + # see issue #216 and #300 + if hasattr(im, "close"): + if hasattr(getattr(im, "fp", None), "close"): + im.close() + + +# Func from skimage + +# This cells contains code from scikit-image, in particular from +# http://github.com/scikit-image/scikit-image/blob/master/ +# skimage/io/_plugins/pil_plugin.py +# The scikit-image license applies. + + +def pil_try_read(im): + try: + # this will raise an IOError if the file is not readable + im.getdata()[0] + except IOError as e: + site = "http://pillow.readthedocs.io/en/latest/installation.html" + site += "#external-libraries" + pillow_error_message = str(e) + error_message = ( + 'Could not load "%s" \n' + 'Reason: "%s"\n' + "Please see documentation at: %s" + % (im.filename, pillow_error_message, site) + ) + raise ValueError(error_message) + + +def _palette_is_grayscale(pil_image): + if pil_image.mode != "P": + return False + elif pil_image.info.get("transparency", None): # see issue #475 + return False + # get palette as an array with R, G, B columns + # Note: starting in pillow 9.1 palettes may have less than 256 entries + palette = np.asarray(pil_image.getpalette()).reshape((-1, 3)) + # Not all palette colors are used; unused colors have junk values. + start, stop = pil_image.getextrema() + valid_palette = palette[start : stop + 1] + # Image is grayscale if channel differences (R - G and G - B) + # are all zero. + return np.allclose(np.diff(valid_palette), 0) + + +def pil_get_frame(im, is_gray=None, as_gray=None, mode=None, dtype=None): + """ + is_gray: Whether the image *is* gray (by inspecting its palette). + as_gray: Whether the resulting image must be converted to gaey. + mode: The mode to convert to. + """ + + if is_gray is None: + is_gray = _palette_is_grayscale(im) + + frame = im + + # Convert ... + if mode is not None: + # Mode is explicitly given ... + if mode != im.mode: + frame = im.convert(mode) + elif as_gray: + pass # don't do any auto-conversions (but do the explicit one above) + elif im.mode == "P" and is_gray: + # Paletted images that are already gray by their palette + # are converted so that the resulting numpy array is 2D. + frame = im.convert("L") + elif im.mode == "P": + # Paletted images are converted to RGB/RGBA. We jump some loops to make + # this work well. + if im.info.get("transparency", None) is not None: + # Let Pillow apply the transparency, see issue #210 and #246 + frame = im.convert("RGBA") + elif im.palette.mode in ("RGB", "RGBA"): + # We can do this ourselves. Pillow seems to sometimes screw + # this up if a multi-gif has a palette for each frame ... + # Create palette array + p = np.frombuffer(im.palette.getdata()[1], np.uint8) + # Restore the raw mode that was saved to be used to parse the palette + if hasattr(im.palette, "rawmode_saved"): + im.palette.rawmode = im.palette.rawmode_saved + mode = im.palette.rawmode if im.palette.rawmode else im.palette.mode + nchannels = len(mode) + # Shape it. + p.shape = -1, nchannels + if p.shape[1] == 3 or (p.shape[1] == 4 and mode[-1] == "X"): + p = np.column_stack((p[:, :3], 255 * np.ones(p.shape[0], p.dtype))) + # Swap the axes if the mode is in BGR and not RGB + if mode.startswith("BGR"): + p = p[:, [2, 1, 0]] if p.shape[1] == 3 else p[:, [2, 1, 0, 3]] + # Apply palette + frame_paletted = np.array(im, np.uint8) + try: + frame = p[frame_paletted] + except Exception: + # Ok, let PIL do it. The introduction of the branch that + # tests `im.info['transparency']` should make this happen + # much less often, but let's keep it, to be safe. + frame = im.convert("RGBA") + else: + # Let Pillow do it. Unlinke skimage, we always convert + # to RGBA; palettes can be RGBA. + if True: # im.format == 'PNG' and 'transparency' in im.info: + frame = im.convert("RGBA") + else: + frame = im.convert("RGB") + elif "A" in im.mode: + frame = im.convert("RGBA") + elif im.mode == "CMYK": + frame = im.convert("RGB") + elif im.format == "GIF" and im.mode == "RGB": + # pillow9 returns RGBA images for subsequent frames so that it can deal + # with multi-frame GIF that use frame-level palettes and don't dispose + # all areas. + + # For backwards compatibility, we promote everything to RGBA. + frame = im.convert("RGBA") + + # Apply a post-convert if necessary + if as_gray: + frame = frame.convert("F") # Scipy compat + elif not isinstance(frame, np.ndarray) and frame.mode == "1": + # Workaround for crash in PIL. When im is 1-bit, the call array(im) + # can cause a segfault, or generate garbage. See + # https://github.com/scipy/scipy/issues/2138 and + # https://github.com/python-pillow/Pillow/issues/350. + # + # This converts im from a 1-bit image to an 8-bit image. + frame = frame.convert("L") + + # Convert to numpy array + if im.mode.startswith("I;16"): + # e.g. in16 PNG's + shape = im.size + dtype = ">u2" if im.mode.endswith("B") else "= 0: + arr = arr.astype(np.uint8) + mode = mode_base = "L" + + else: + arr = image_as_uint(arr, bitdepth=16) + + else: + arr = image_as_uint(arr, bitdepth=8) + mode = "L" + mode_base = "L" + + if mode == "I;16" and int(getattr(Image, "__version__", "0").split(".")[0]) < 6: + # Pillow < v6.0.0 has limited support for the "I;16" mode, + # requiring us to fall back to this expensive workaround. + # tobytes actually creates a copy of the image, which is costly. + array_buffer = arr.tobytes() + if arr.ndim == 2: + im = Image.new(mode_base, arr.T.shape) + im.frombytes(array_buffer, "raw", mode) + else: + image_shape = (arr.shape[1], arr.shape[0]) + im = Image.frombytes(mode, image_shape, array_buffer) + return im + else: + return Image.fromarray(arr, mode) + + +# imported for backwards compatibility +from .pillowmulti import GIFFormat, TIFFFormat # noqa: E402, F401 diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/pillowmulti.py b/parrot/lib/python3.10/site-packages/imageio/plugins/pillowmulti.py new file mode 100644 index 0000000000000000000000000000000000000000..e41c71073fd5fe2b301eac4031da22745e9cec99 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/pillowmulti.py @@ -0,0 +1,338 @@ +""" +PIL formats for multiple images. +""" + +import logging + +import numpy as np + +from .pillow_legacy import PillowFormat, image_as_uint, ndarray_to_pil + +logger = logging.getLogger(__name__) + +NeuQuant = None # we can implement this when we need it + + +class TIFFFormat(PillowFormat): + _modes = "i" # arg, why bother; people should use the tiffile version + _description = "TIFF format (Pillow)" + + +class GIFFormat(PillowFormat): + """See :mod:`imageio.plugins.pillow_legacy`""" + + _modes = "iI" + _description = "Static and animated gif (Pillow)" + + # GIF reader needs no modifications compared to base pillow reader + + class Writer(PillowFormat.Writer): # pragma: no cover + def _open( + self, + loop=0, + duration=None, + fps=10, + palettesize=256, + quantizer=0, + subrectangles=False, + ): + from PIL import __version__ as pillow_version + + major, minor, patch = tuple(int(x) for x in pillow_version.split(".")) + if major == 10 and minor >= 1: + raise ImportError( + f"Pillow v{pillow_version} is not supported by ImageIO's legacy " + "pillow plugin when writing GIFs. Consider switching to the new " + "plugin or downgrading to `pillow<10.1.0`." + ) + + # Check palettesize + palettesize = int(palettesize) + if palettesize < 2 or palettesize > 256: + raise ValueError("GIF quantize param must be 2..256") + if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]: + palettesize = 2 ** int(np.log2(128) + 0.999) + logger.warning( + "Warning: palettesize (%r) modified to a factor of " + "two between 2-256." % palettesize + ) + # Duratrion / fps + if duration is None: + self._duration = 1.0 / float(fps) + elif isinstance(duration, (list, tuple)): + self._duration = [float(d) for d in duration] + else: + self._duration = float(duration) + # loop + loop = float(loop) + if loop <= 0 or loop == float("inf"): + loop = 0 + loop = int(loop) + # Subrectangles / dispose + subrectangles = bool(subrectangles) + self._dispose = 1 if subrectangles else 2 + # The "0" (median cut) quantizer is by far the best + + fp = self.request.get_file() + self._writer = GifWriter( + fp, subrectangles, loop, quantizer, int(palettesize) + ) + + def _close(self): + self._writer.close() + + def _append_data(self, im, meta): + im = image_as_uint(im, bitdepth=8) + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + duration = self._duration + if isinstance(duration, list): + duration = duration[min(len(duration) - 1, self._writer._count)] + dispose = self._dispose + self._writer.add_image(im, duration, dispose) + + return + + +def intToBin(i): + return i.to_bytes(2, byteorder="little") + + +class GifWriter: # pragma: no cover + """Class that for helping write the animated GIF file. This is based on + code from images2gif.py (part of visvis). The version here is modified + to allow streamed writing. + """ + + def __init__( + self, + file, + opt_subrectangle=True, + opt_loop=0, + opt_quantizer=0, + opt_palette_size=256, + ): + self.fp = file + + self.opt_subrectangle = opt_subrectangle + self.opt_loop = opt_loop + self.opt_quantizer = opt_quantizer + self.opt_palette_size = opt_palette_size + + self._previous_image = None # as np array + self._global_palette = None # as bytes + self._count = 0 + + from PIL.GifImagePlugin import getdata + + self.getdata = getdata + + def add_image(self, im, duration, dispose): + # Prepare image + im_rect, rect = im, (0, 0) + if self.opt_subrectangle: + im_rect, rect = self.getSubRectangle(im) + im_pil = self.converToPIL(im_rect, self.opt_quantizer, self.opt_palette_size) + + # Get pallette - apparently, this is the 3d element of the header + # (but it has not always been). Best we've got. Its not the same + # as im_pil.palette.tobytes(). + from PIL.GifImagePlugin import getheader + + palette = getheader(im_pil)[0][3] + + # Write image + if self._count == 0: + self.write_header(im_pil, palette, self.opt_loop) + self._global_palette = palette + self.write_image(im_pil, palette, rect, duration, dispose) + # assert len(palette) == len(self._global_palette) + + # Bookkeeping + self._previous_image = im + self._count += 1 + + def write_header(self, im, globalPalette, loop): + # Gather info + header = self.getheaderAnim(im) + appext = self.getAppExt(loop) + # Write + self.fp.write(header) + self.fp.write(globalPalette) + self.fp.write(appext) + + def close(self): + self.fp.write(";".encode("utf-8")) # end gif + + def write_image(self, im, palette, rect, duration, dispose): + fp = self.fp + + # Gather local image header and data, using PIL's getdata. That + # function returns a list of bytes objects, but which parts are + # what has changed multiple times, so we put together the first + # parts until we have enough to form the image header. + data = self.getdata(im) + imdes = b"" + while data and len(imdes) < 11: + imdes += data.pop(0) + assert len(imdes) == 11 + + # Make image descriptor suitable for using 256 local color palette + lid = self.getImageDescriptor(im, rect) + graphext = self.getGraphicsControlExt(duration, dispose) + + # Write local header + if (palette != self._global_palette) or (dispose != 2): + # Use local color palette + fp.write(graphext) + fp.write(lid) # write suitable image descriptor + fp.write(palette) # write local color table + fp.write(b"\x08") # LZW minimum size code + else: + # Use global color palette + fp.write(graphext) + fp.write(imdes) # write suitable image descriptor + + # Write image data + for d in data: + fp.write(d) + + def getheaderAnim(self, im): + """Get animation header. To replace PILs getheader()[0]""" + bb = b"GIF89a" + bb += intToBin(im.size[0]) + bb += intToBin(im.size[1]) + bb += b"\x87\x00\x00" + return bb + + def getImageDescriptor(self, im, xy=None): + """Used for the local color table properties per image. + Otherwise global color table applies to all frames irrespective of + whether additional colors comes in play that require a redefined + palette. Still a maximum of 256 color per frame, obviously. + + Written by Ant1 on 2010-08-22 + Modified by Alex Robinson in Janurari 2011 to implement subrectangles. + """ + + # Defaule use full image and place at upper left + if xy is None: + xy = (0, 0) + + # Image separator, + bb = b"\x2C" + + # Image position and size + bb += intToBin(xy[0]) # Left position + bb += intToBin(xy[1]) # Top position + bb += intToBin(im.size[0]) # image width + bb += intToBin(im.size[1]) # image height + + # packed field: local color table flag1, interlace0, sorted table0, + # reserved00, lct size111=7=2^(7 + 1)=256. + bb += b"\x87" + + # LZW minimum size code now comes later, begining of [imagedata] blocks + return bb + + def getAppExt(self, loop): + """Application extension. This part specifies the amount of loops. + If loop is 0 or inf, it goes on infinitely. + """ + if loop == 1: + return b"" + if loop == 0: + loop = 2**16 - 1 + bb = b"" + if loop != 0: # omit the extension if we would like a nonlooping gif + bb = b"\x21\xFF\x0B" # application extension + bb += b"NETSCAPE2.0" + bb += b"\x03\x01" + bb += intToBin(loop) + bb += b"\x00" # end + return bb + + def getGraphicsControlExt(self, duration=0.1, dispose=2): + """Graphics Control Extension. A sort of header at the start of + each image. Specifies duration and transparancy. + + Dispose + ------- + * 0 - No disposal specified. + * 1 - Do not dispose. The graphic is to be left in place. + * 2 - Restore to background color. The area used by the graphic + must be restored to the background color. + * 3 - Restore to previous. The decoder is required to restore the + area overwritten by the graphic with what was there prior to + rendering the graphic. + * 4-7 -To be defined. + """ + + bb = b"\x21\xF9\x04" + bb += chr((dispose & 3) << 2).encode("utf-8") + # low bit 1 == transparency, + # 2nd bit 1 == user input , next 3 bits, the low two of which are used, + # are dispose. + bb += intToBin(int(duration * 100 + 0.5)) # in 100th of seconds + bb += b"\x00" # no transparant color + bb += b"\x00" # end + return bb + + def getSubRectangle(self, im): + """Calculate the minimal rectangle that need updating. Returns + a two-element tuple containing the cropped image and an x-y tuple. + + Calculating the subrectangles takes extra time, obviously. However, + if the image sizes were reduced, the actual writing of the GIF + goes faster. In some cases applying this method produces a GIF faster. + """ + + # Cannot do subrectangle for first image + if self._count == 0: + return im, (0, 0) + + prev = self._previous_image + + # Get difference, sum over colors + diff = np.abs(im - prev) + if diff.ndim == 3: + diff = diff.sum(2) + # Get begin and end for both dimensions + X = np.argwhere(diff.sum(0)) + Y = np.argwhere(diff.sum(1)) + # Get rect coordinates + if X.size and Y.size: + x0, x1 = int(X[0]), int(X[-1] + 1) + y0, y1 = int(Y[0]), int(Y[-1] + 1) + else: # No change ... make it minimal + x0, x1 = 0, 2 + y0, y1 = 0, 2 + + return im[y0:y1, x0:x1], (x0, y0) + + def converToPIL(self, im, quantizer, palette_size=256): + """Convert image to Paletted PIL image. + + PIL used to not do a very good job at quantization, but I guess + this has improved a lot (at least in Pillow). I don't think we need + neuqant (and we can add it later if we really want). + """ + + im_pil = ndarray_to_pil(im, "gif") + + if quantizer in ("nq", "neuquant"): + # NeuQuant algorithm + nq_samplefac = 10 # 10 seems good in general + im_pil = im_pil.convert("RGBA") # NQ assumes RGBA + nqInstance = NeuQuant(im_pil, nq_samplefac) # Learn colors + im_pil = nqInstance.quantize(im_pil, colors=palette_size) + elif quantizer in (0, 1, 2): + # Adaptive PIL algorithm + if quantizer == 2: + im_pil = im_pil.convert("RGBA") + else: + im_pil = im_pil.convert("RGB") + im_pil = im_pil.quantize(colors=palette_size, method=quantizer) + else: + raise ValueError("Invalid value for quantizer: %r" % quantizer) + return im_pil diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/pyav.py b/parrot/lib/python3.10/site-packages/imageio/plugins/pyav.py new file mode 100644 index 0000000000000000000000000000000000000000..79a855fd60fc714b1fb4b142ff7849e02ab12679 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/pyav.py @@ -0,0 +1,1198 @@ +"""Read/Write Videos (and images) using PyAV. + +.. note:: + To use this plugin you need to have `PyAV `_ + installed:: + + pip install av + +This plugin wraps pyAV, a pythonic binding for the FFMPEG library. It is similar +to our FFMPEG plugin, has improved performance, features a robust interface, and +aims to supersede the FFMPEG plugin in the future. + + +Methods +------- +.. note:: + Check the respective function for a list of supported kwargs and detailed + documentation. + +.. autosummary:: + :toctree: + + PyAVPlugin.read + PyAVPlugin.iter + PyAVPlugin.write + PyAVPlugin.properties + PyAVPlugin.metadata + +Additional methods available inside the :func:`imopen ` +context: + +.. autosummary:: + :toctree: + + PyAVPlugin.init_video_stream + PyAVPlugin.write_frame + PyAVPlugin.set_video_filter + PyAVPlugin.container_metadata + PyAVPlugin.video_stream_metadata + +Advanced API +------------ + +In addition to the default ImageIO v3 API this plugin exposes custom functions +that are specific to reading/writing video and its metadata. These are available +inside the :func:`imopen ` context and allow fine-grained +control over how the video is processed. The functions are documented above and +below you can find a usage example:: + + import imageio.v3 as iio + + with iio.imopen("test.mp4", "w", plugin="pyav") as file: + file.init_video_stream("libx264") + file.container_metadata["comment"] = "This video was created using ImageIO." + + for _ in range(5): + for frame in iio.imiter("imageio:newtonscradle.gif"): + file.write_frame(frame) + + meta = iio.immeta("test.mp4", plugin="pyav") + assert meta["comment"] == "This video was created using ImageIO." + + + +Pixel Formats (Colorspaces) +--------------------------- + +By default, this plugin converts the video into 8-bit RGB (called ``rgb24`` in +ffmpeg). This is a useful behavior for many use-cases, but sometimes you may +want to use the video's native colorspace or you may wish to convert the video +into an entirely different colorspace. This is controlled using the ``format`` +kwarg. You can use ``format=None`` to leave the image in its native colorspace +or specify any colorspace supported by FFMPEG as long as it is stridable, i.e., +as long as it can be represented by a single numpy array. Some useful choices +include: + +- rgb24 (default; 8-bit RGB) +- rgb48le (16-bit lower-endian RGB) +- bgr24 (8-bit BGR; openCVs default colorspace) +- gray (8-bit grayscale) +- yuv444p (8-bit channel-first YUV) + +Further, FFMPEG maintains a list of available formats, albeit not as part of the +narrative docs. It can be `found here +`_ (warning: C source +code). + +Filters +------- + +On top of providing basic read/write functionality, this plugin allows you to +use the full collection of `video filters available in FFMPEG +`_. This means that you +can apply excessive preprocessing to your video before retrieving it as a numpy +array or apply excessive post-processing before you encode your data. + +Filters come in two forms: sequences or graphs. Filter sequences are, as the +name suggests, sequences of filters that are applied one after the other. They +are specified using the ``filter_sequence`` kwarg. Filter graphs, on the other +hand, come in the form of a directed graph and are specified using the +``filter_graph`` kwarg. + +.. note:: + All filters are either sequences or graphs. If all you want is to apply a + single filter, you can do this by specifying a filter sequence with a single + entry. + +A ``filter_sequence`` is a list of filters, each defined through a 2-element +tuple of the form ``(filter_name, filter_parameters)``. The first element of the +tuple is the name of the filter. The second element are the filter parameters, +which can be given either as a string or a dict. The string matches the same +format that you would use when specifying the filter using the ffmpeg +command-line tool and the dict has entries of the form ``parameter:value``. For +example:: + + import imageio.v3 as iio + + # using a filter_parameters str + img1 = iio.imread( + "imageio:cockatoo.mp4", + plugin="pyav", + filter_sequence=[ + ("rotate", "45*PI/180") + ] + ) + + # using a filter_parameters dict + img2 = iio.imread( + "imageio:cockatoo.mp4", + plugin="pyav", + filter_sequence=[ + ("rotate", {"angle":"45*PI/180", "fillcolor":"AliceBlue"}) + ] + ) + +A ``filter_graph``, on the other hand, is specified using a ``(nodes, edges)`` +tuple. It is best explained using an example:: + + img = iio.imread( + "imageio:cockatoo.mp4", + plugin="pyav", + filter_graph=( + { + "split": ("split", ""), + "scale_overlay":("scale", "512:-1"), + "overlay":("overlay", "x=25:y=25:enable='between(t,1,8)'"), + }, + [ + ("video_in", "split", 0, 0), + ("split", "overlay", 0, 0), + ("split", "scale_overlay", 1, 0), + ("scale_overlay", "overlay", 0, 1), + ("overlay", "video_out", 0, 0), + ] + ) + ) + +The above transforms the video to have picture-in-picture of itself in the top +left corner. As you can see, nodes are specified using a dict which has names as +its keys and filter tuples as values; the same tuples as the ones used when +defining a filter sequence. Edges are a list of a 4-tuples of the form +``(node_out, node_in, output_idx, input_idx)`` and specify which two filters are +connected and which inputs/outputs should be used for this. + +Further, there are two special nodes in a filter graph: ``video_in`` and +``video_out``, which represent the graph's input and output respectively. These +names can not be chosen for other nodes (those nodes would simply be +overwritten), and for a graph to be valid there must be a path from the input to +the output and all nodes in the graph must be connected. + +While most graphs are quite simple, they can become very complex and we +recommend that you read through the `FFMPEG documentation +`_ and their +examples to better understand how to use them. + +""" + +from fractions import Fraction +from math import ceil +from typing import Any, Dict, List, Optional, Tuple, Union, Generator + +import av +import av.filter +import numpy as np +from numpy.lib.stride_tricks import as_strided + +from ..core import Request +from ..core.request import URI_BYTES, InitializationError, IOMode +from ..core.v3_plugin_api import ImageProperties, PluginV3 + + +def _format_to_dtype(format: av.VideoFormat) -> np.dtype: + """Convert a pyAV video format into a numpy dtype""" + + if len(format.components) == 0: + # fake format + raise ValueError( + f"Can't determine dtype from format `{format.name}`. It has no channels." + ) + + endian = ">" if format.is_big_endian else "<" + dtype = "f" if "f32" in format.name else "u" + bits_per_channel = [x.bits for x in format.components] + n_bytes = str(int(ceil(bits_per_channel[0] / 8))) + + return np.dtype(endian + dtype + n_bytes) + + +def _get_frame_shape(frame: av.VideoFrame) -> Tuple[int, ...]: + """Compute the frame's array shape + + Parameters + ---------- + frame : av.VideoFrame + A frame for which the resulting shape should be computed. + + Returns + ------- + shape : Tuple[int, ...] + A tuple describing the shape of the image data in the frame. + + """ + + widths = [component.width for component in frame.format.components] + heights = [component.height for component in frame.format.components] + bits = np.array([component.bits for component in frame.format.components]) + line_sizes = [plane.line_size for plane in frame.planes] + + subsampled_width = widths[:-1] != widths[1:] + subsampled_height = heights[:-1] != heights[1:] + unaligned_components = np.any(bits % 8 != 0) or (line_sizes[:-1] != line_sizes[1:]) + if subsampled_width or subsampled_height or unaligned_components: + raise IOError( + f"{frame.format.name} can't be expressed as a strided array." + "Use `format=` to select a format to convert into." + ) + + shape = [frame.height, frame.width] + + # ffmpeg doesn't have a notion of channel-first or channel-last formats + # instead it stores frames in one or more planes which contain individual + # components of a pixel depending on the pixel format. For channel-first + # formats each component lives on a separate plane (n_planes) and for + # channel-last formats all components are packed on a single plane + # (n_channels) + n_planes = max([component.plane for component in frame.format.components]) + 1 + if n_planes > 1: + shape = [n_planes] + shape + + channels_per_plane = [0] * n_planes + for component in frame.format.components: + channels_per_plane[component.plane] += 1 + n_channels = max(channels_per_plane) + + if n_channels > 1: + shape = shape + [n_channels] + + return tuple(shape) + + +class PyAVPlugin(PluginV3): + """Support for pyAV as backend. + + Parameters + ---------- + request : iio.Request + A request object that represents the users intent. It provides a + standard interface to access various the various ImageResources and + serves them to the plugin as a file object (or file). Check the docs for + details. + container : str + Only used during `iio_mode="w"`! If not None, overwrite the default container + format chosen by pyav. + kwargs : Any + Additional kwargs are forwarded to PyAV's constructor. + + """ + + def __init__(self, request: Request, *, container: str = None, **kwargs) -> None: + """Initialize a new Plugin Instance. + + See Plugin's docstring for detailed documentation. + + Notes + ----- + The implementation here stores the request as a local variable that is + exposed using a @property below. If you inherit from PluginV3, remember + to call ``super().__init__(request)``. + + """ + + super().__init__(request) + + self._container = None + self._video_stream = None + self._video_filter = None + + if request.mode.io_mode == IOMode.read: + self._next_idx = 0 + try: + if request._uri_type == 5: # 5 is the value of URI_HTTP + # pyav should read from HTTP by itself. This enables reading + # HTTP-based streams like DASH. Note that solving streams + # like this is temporary until the new request object gets + # implemented. + self._container = av.open(request.raw_uri, **kwargs) + else: + self._container = av.open(request.get_file(), **kwargs) + self._video_stream = self._container.streams.video[0] + self._decoder = self._container.decode(video=0) + except av.AVError: + if isinstance(request.raw_uri, bytes): + msg = "PyAV does not support these ``" + else: + msg = f"PyAV does not support `{request.raw_uri}`" + raise InitializationError(msg) from None + else: + self.frames_written = 0 + file_handle = self.request.get_file() + filename = getattr(file_handle, "name", None) + extension = self.request.extension or self.request.format_hint + if extension is None: + raise InitializationError("Can't determine output container to use.") + + # hacky, but beats running our own format selection logic + # (since av_guess_format is not exposed) + try: + setattr(file_handle, "name", filename or "tmp" + extension) + except AttributeError: + pass # read-only, nothing we can do + + try: + self._container = av.open( + file_handle, mode="w", format=container, **kwargs + ) + except ValueError: + raise InitializationError( + f"PyAV can not write to `{self.request.raw_uri}`" + ) + + # --------------------- + # Standard V3 Interface + # --------------------- + + def read( + self, + *, + index: int = ..., + format: str = "rgb24", + filter_sequence: List[Tuple[str, Union[str, dict]]] = None, + filter_graph: Tuple[dict, List] = None, + constant_framerate: bool = None, + thread_count: int = 0, + thread_type: str = None, + ) -> np.ndarray: + """Read frames from the video. + + If ``index`` is an integer, this function reads the index-th frame from + the file. If ``index`` is ... (Ellipsis), this function reads all frames + from the video, stacks them along the first dimension, and returns a + batch of frames. + + Parameters + ---------- + index : int + The index of the frame to read, e.g. ``index=5`` reads the 5th + frame. If ``...``, read all the frames in the video and stack them + along a new, prepended, batch dimension. + format : str + Set the returned colorspace. If not None (default: rgb24), convert + the data into the given format before returning it. If ``None`` + return the data in the encoded format if it can be expressed as a + strided array; otherwise raise an Exception. + filter_sequence : List[str, str, dict] + If not None, apply the given sequence of FFmpeg filters to each + ndimage. Check the (module-level) plugin docs for details and + examples. + filter_graph : (dict, List) + If not None, apply the given graph of FFmpeg filters to each + ndimage. The graph is given as a tuple of two dicts. The first dict + contains a (named) set of nodes, and the second dict contains a set + of edges between nodes of the previous dict. Check the (module-level) + plugin docs for details and examples. + constant_framerate : bool + If True assume the video's framerate is constant. This allows for + faster seeking inside the file. If False, the video is reset before + each read and searched from the beginning. If None (default), this + value will be read from the container format. + thread_count : int + How many threads to use when decoding a frame. The default is 0, + which will set the number using ffmpeg's default, which is based on + the codec, number of available cores, threadding model, and other + considerations. + thread_type : str + The threading model to be used. One of + + - `"SLICE"`: threads assemble parts of the current frame + - `"FRAME"`: threads may assemble future frames + - None (default): Uses ``"FRAME"`` if ``index=...`` and ffmpeg's + default otherwise. + + + Returns + ------- + frame : np.ndarray + A numpy array containing loaded frame data. + + Notes + ----- + Accessing random frames repeatedly is costly (O(k), where k is the + average distance between two keyframes). You should do so only sparingly + if possible. In some cases, it can be faster to bulk-read the video (if + it fits into memory) and to then access the returned ndarray randomly. + + The current implementation may cause problems for b-frames, i.e., + bidirectionaly predicted pictures. I lack test videos to write unit + tests for this case. + + Reading from an index other than ``...``, i.e. reading a single frame, + currently doesn't support filters that introduce delays. + + """ + + if index is ...: + props = self.properties(format=format) + uses_filter = ( + self._video_filter is not None + or filter_graph is not None + or filter_sequence is not None + ) + + self._container.seek(0) + if not uses_filter and props.shape[0] != 0: + frames = np.empty(props.shape, dtype=props.dtype) + for idx, frame in enumerate( + self.iter( + format=format, + filter_sequence=filter_sequence, + filter_graph=filter_graph, + thread_count=thread_count, + thread_type=thread_type or "FRAME", + ) + ): + frames[idx] = frame + else: + frames = np.stack( + [ + x + for x in self.iter( + format=format, + filter_sequence=filter_sequence, + filter_graph=filter_graph, + thread_count=thread_count, + thread_type=thread_type or "FRAME", + ) + ] + ) + + # reset stream container, because threading model can't change after + # first access + self._video_stream.close() + self._video_stream = self._container.streams.video[0] + + return frames + + if thread_type is not None and thread_type != self._video_stream.thread_type: + self._video_stream.thread_type = thread_type + if ( + thread_count != 0 + and thread_count != self._video_stream.codec_context.thread_count + ): + # in FFMPEG thread_count == 0 means use the default count, which we + # change to mean don't change the thread count. + self._video_stream.codec_context.thread_count = thread_count + + if constant_framerate is None: + constant_framerate = not self._container.format.variable_fps + + # note: cheap for contigous incremental reads + self._seek(index, constant_framerate=constant_framerate) + desired_frame = next(self._decoder) + self._next_idx += 1 + + self.set_video_filter(filter_sequence, filter_graph) + if self._video_filter is not None: + desired_frame = self._video_filter.send(desired_frame) + + return self._unpack_frame(desired_frame, format=format) + + def iter( + self, + *, + format: str = "rgb24", + filter_sequence: List[Tuple[str, Union[str, dict]]] = None, + filter_graph: Tuple[dict, List] = None, + thread_count: int = 0, + thread_type: str = None, + ) -> np.ndarray: + """Yield frames from the video. + + Parameters + ---------- + frame : np.ndarray + A numpy array containing loaded frame data. + format : str + Convert the data into the given format before returning it. If None, + return the data in the encoded format if it can be expressed as a + strided array; otherwise raise an Exception. + filter_sequence : List[str, str, dict] + Set the returned colorspace. If not None (default: rgb24), convert + the data into the given format before returning it. If ``None`` + return the data in the encoded format if it can be expressed as a + strided array; otherwise raise an Exception. + filter_graph : (dict, List) + If not None, apply the given graph of FFmpeg filters to each + ndimage. The graph is given as a tuple of two dicts. The first dict + contains a (named) set of nodes, and the second dict contains a set + of edges between nodes of the previous dict. Check the (module-level) + plugin docs for details and examples. + thread_count : int + How many threads to use when decoding a frame. The default is 0, + which will set the number using ffmpeg's default, which is based on + the codec, number of available cores, threadding model, and other + considerations. + thread_type : str + The threading model to be used. One of + + - `"SLICE"` (default): threads assemble parts of the current frame + - `"FRAME"`: threads may assemble future frames (faster for bulk reading) + + + Yields + ------ + frame : np.ndarray + A (decoded) video frame. + + + """ + + self._video_stream.thread_type = thread_type or "SLICE" + self._video_stream.codec_context.thread_count = thread_count + + self.set_video_filter(filter_sequence, filter_graph) + + for frame in self._decoder: + self._next_idx += 1 + + if self._video_filter is not None: + try: + frame = self._video_filter.send(frame) + except StopIteration: + break + + if frame is None: + continue + + yield self._unpack_frame(frame, format=format) + + if self._video_filter is not None: + for frame in self._video_filter: + yield self._unpack_frame(frame, format=format) + + def write( + self, + ndimage: Union[np.ndarray, List[np.ndarray]], + *, + codec: str = None, + is_batch: bool = True, + fps: int = 24, + in_pixel_format: str = "rgb24", + out_pixel_format: str = None, + filter_sequence: List[Tuple[str, Union[str, dict]]] = None, + filter_graph: Tuple[dict, List] = None, + ) -> Optional[bytes]: + """Save a ndimage as a video. + + Given a batch of frames (stacked along the first axis) or a list of + frames, encode them and add the result to the ImageResource. + + Parameters + ---------- + ndimage : ArrayLike, List[ArrayLike] + The ndimage to encode and write to the ImageResource. + codec : str + The codec to use when encoding frames. Only needed on first write + and ignored on subsequent writes. + is_batch : bool + If True (default), the ndimage is a batch of images, otherwise it is + a single image. This parameter has no effect on lists of ndimages. + fps : str + The resulting videos frames per second. + in_pixel_format : str + The pixel format of the incoming ndarray. Defaults to "rgb24" and can + be any stridable pix_fmt supported by FFmpeg. + out_pixel_format : str + The pixel format to use while encoding frames. If None (default) + use the codec's default. + filter_sequence : List[str, str, dict] + If not None, apply the given sequence of FFmpeg filters to each + ndimage. Check the (module-level) plugin docs for details and + examples. + filter_graph : (dict, List) + If not None, apply the given graph of FFmpeg filters to each + ndimage. The graph is given as a tuple of two dicts. The first dict + contains a (named) set of nodes, and the second dict contains a set + of edges between nodes of the previous dict. Check the (module-level) + plugin docs for details and examples. + + Returns + ------- + encoded_image : bytes or None + If the chosen ImageResource is the special target ``""`` then + write will return a byte string containing the encoded image data. + Otherwise, it returns None. + + Notes + ----- + When writing ````, the video is finalized immediately after the + first write call and calling write multiple times to append frames is + not possible. + + """ + + if isinstance(ndimage, list): + # frames shapes must agree for video + if any(f.shape != ndimage[0].shape for f in ndimage): + raise ValueError("All frames should have the same shape") + elif not is_batch: + ndimage = np.asarray(ndimage)[None, ...] + else: + ndimage = np.asarray(ndimage) + + if self._video_stream is None: + self.init_video_stream(codec, fps=fps, pixel_format=out_pixel_format) + + self.set_video_filter(filter_sequence, filter_graph) + + for img in ndimage: + self.write_frame(img, pixel_format=in_pixel_format) + + if self.request._uri_type == URI_BYTES: + # bytes are immutuable, so we have to flush immediately + # and can't support appending + self._flush_writer() + self._container.close() + + return self.request.get_file().getvalue() + + def properties(self, index: int = ..., *, format: str = "rgb24") -> ImageProperties: + """Standardized ndimage metadata. + + Parameters + ---------- + index : int + The index of the ndimage for which to return properties. If ``...`` + (Ellipsis, default), return the properties for the resulting batch + of frames. + format : str + If not None (default: rgb24), convert the data into the given format + before returning it. If None return the data in the encoded format + if that can be expressed as a strided array; otherwise raise an + Exception. + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + This function is efficient and won't process any pixel data. + + The provided metadata does not include modifications by any filters + (through ``filter_sequence`` or ``filter_graph``). + + """ + + video_width = self._video_stream.codec_context.width + video_height = self._video_stream.codec_context.height + pix_format = format or self._video_stream.codec_context.pix_fmt + frame_template = av.VideoFrame(video_width, video_height, pix_format) + + shape = _get_frame_shape(frame_template) + if index is ...: + n_frames = self._video_stream.frames + shape = (n_frames,) + shape + + return ImageProperties( + shape=tuple(shape), + dtype=_format_to_dtype(frame_template.format), + n_images=shape[0] if index is ... else None, + is_batch=index is ..., + ) + + def metadata( + self, + index: int = ..., + exclude_applied: bool = True, + constant_framerate: bool = None, + ) -> Dict[str, Any]: + """Format-specific metadata. + + Returns a dictionary filled with metadata that is either stored in the + container, the video stream, or the frame's side-data. + + Parameters + ---------- + index : int + If ... (Ellipsis, default) return global metadata (the metadata + stored in the container and video stream). If not ..., return the + side data stored in the frame at the given index. + exclude_applied : bool + Currently, this parameter has no effect. It exists for compliance with + the ImageIO v3 API. + constant_framerate : bool + If True assume the video's framerate is constant. This allows for + faster seeking inside the file. If False, the video is reset before + each read and searched from the beginning. If None (default), this + value will be read from the container format. + + Returns + ------- + metadata : dict + A dictionary filled with format-specific metadata fields and their + values. + + """ + + metadata = dict() + + if index is ...: + # useful flags defined on the container and/or video stream + metadata.update( + { + "video_format": self._video_stream.codec_context.pix_fmt, + "codec": self._video_stream.codec.name, + "long_codec": self._video_stream.codec.long_name, + "profile": self._video_stream.profile, + "fps": float(self._video_stream.guessed_rate), + } + ) + if self._video_stream.duration is not None: + duration = float( + self._video_stream.duration * self._video_stream.time_base + ) + metadata.update({"duration": duration}) + + metadata.update(self.container_metadata) + metadata.update(self.video_stream_metadata) + return metadata + + if constant_framerate is None: + constant_framerate = not self._container.format.variable_fps + + self._seek(index, constant_framerate=constant_framerate) + desired_frame = next(self._decoder) + self._next_idx += 1 + + # useful flags defined on the frame + metadata.update( + { + "key_frame": bool(desired_frame.key_frame), + "time": desired_frame.time, + "interlaced_frame": bool(desired_frame.interlaced_frame), + "frame_type": desired_frame.pict_type.name, + } + ) + + # side data + metadata.update( + {item.type.name: item.to_bytes() for item in desired_frame.side_data} + ) + + return metadata + + def close(self) -> None: + """Close the Video.""" + + is_write = self.request.mode.io_mode == IOMode.write + if is_write and self._video_stream is not None: + self._flush_writer() + + if self._video_stream is not None: + try: + self._video_stream.close() + except ValueError: + pass # stream already closed + + if self._container is not None: + self._container.close() + + self.request.finish() + + def __enter__(self) -> "PyAVPlugin": + return super().__enter__() + + # ------------------------------ + # Add-on Interface inside imopen + # ------------------------------ + + def init_video_stream( + self, + codec: str, + *, + fps: float = 24, + pixel_format: str = None, + max_keyframe_interval: int = None, + force_keyframes: bool = None, + ) -> None: + """Initialize a new video stream. + + This function adds a new video stream to the ImageResource using the + selected encoder (codec), framerate, and colorspace. + + Parameters + ---------- + codec : str + The codec to use, e.g. ``"libx264"`` or ``"vp9"``. + fps : float + The desired framerate of the video stream (frames per second). + pixel_format : str + The pixel format to use while encoding frames. If None (default) use + the codec's default. + max_keyframe_interval : int + The maximum distance between two intra frames (I-frames). Also known + as GOP size. If unspecified use the codec's default. Note that not + every I-frame is a keyframe; see the notes for details. + force_keyframes : bool + If True, limit inter frames dependency to frames within the current + keyframe interval (GOP), i.e., force every I-frame to be a keyframe. + If unspecified, use the codec's default. + + Notes + ----- + You can usually leave ``max_keyframe_interval`` and ``force_keyframes`` + at their default values, unless you try to generate seek-optimized video + or have a similar specialist use-case. In this case, ``force_keyframes`` + controls the ability to seek to _every_ I-frame, and + ``max_keyframe_interval`` controls how close to a random frame you can + seek. Low values allow more fine-grained seek at the expense of + file-size (and thus I/O performance). + + """ + + stream = self._container.add_stream(codec, fps) + stream.time_base = Fraction(1 / fps).limit_denominator(int(2**16 - 1)) + if pixel_format is not None: + stream.pix_fmt = pixel_format + if max_keyframe_interval is not None: + stream.gop_size = max_keyframe_interval + if force_keyframes is not None: + stream.closed_gop = force_keyframes + + self._video_stream = stream + + def write_frame(self, frame: np.ndarray, *, pixel_format: str = "rgb24") -> None: + """Add a frame to the video stream. + + This function appends a new frame to the video. It assumes that the + stream previously has been initialized. I.e., ``init_video_stream`` has + to be called before calling this function for the write to succeed. + + Parameters + ---------- + frame : np.ndarray + The image to be appended/written to the video stream. + pixel_format : str + The colorspace (pixel format) of the incoming frame. + + Notes + ----- + Frames may be held in a buffer, e.g., by the filter pipeline used during + writing or by FFMPEG to batch them prior to encoding. Make sure to + ``.close()`` the plugin or to use a context manager to ensure that all + frames are written to the ImageResource. + + """ + + # manual packing of ndarray into frame + # (this should live in pyAV, but it doesn't support all the formats we + # want and PRs there are slow) + pixel_format = av.VideoFormat(pixel_format) + img_dtype = _format_to_dtype(pixel_format) + width = frame.shape[2 if pixel_format.is_planar else 1] + height = frame.shape[1 if pixel_format.is_planar else 0] + av_frame = av.VideoFrame(width, height, pixel_format.name) + if pixel_format.is_planar: + for idx, plane in enumerate(av_frame.planes): + plane_array = np.frombuffer(plane, dtype=img_dtype) + plane_array = as_strided( + plane_array, + shape=(plane.height, plane.width), + strides=(plane.line_size, img_dtype.itemsize), + ) + plane_array[...] = frame[idx] + else: + if pixel_format.name.startswith("bayer_"): + # ffmpeg doesn't describe bayer formats correctly + # see https://github.com/imageio/imageio/issues/761#issuecomment-1059318851 + # and following for details. + n_channels = 1 + else: + n_channels = len(pixel_format.components) + + plane = av_frame.planes[0] + plane_shape = (plane.height, plane.width) + plane_strides = (plane.line_size, n_channels * img_dtype.itemsize) + if n_channels > 1: + plane_shape += (n_channels,) + plane_strides += (img_dtype.itemsize,) + + plane_array = as_strided( + np.frombuffer(plane, dtype=img_dtype), + shape=plane_shape, + strides=plane_strides, + ) + plane_array[...] = frame + + stream = self._video_stream + av_frame.time_base = stream.codec_context.time_base + av_frame.pts = self.frames_written + self.frames_written += 1 + + if self._video_filter is not None: + av_frame = self._video_filter.send(av_frame) + if av_frame is None: + return + + if stream.frames == 0: + stream.width = av_frame.width + stream.height = av_frame.height + + for packet in stream.encode(av_frame): + self._container.mux(packet) + + def set_video_filter( + self, + filter_sequence: List[Tuple[str, Union[str, dict]]] = None, + filter_graph: Tuple[dict, List] = None, + ) -> None: + """Set the filter(s) to use. + + This function creates a new FFMPEG filter graph to use when reading or + writing video. In the case of reading, frames are passed through the + filter graph before begin returned and, in case of writing, frames are + passed through the filter before being written to the video. + + Parameters + ---------- + filter_sequence : List[str, str, dict] + If not None, apply the given sequence of FFmpeg filters to each + ndimage. Check the (module-level) plugin docs for details and + examples. + filter_graph : (dict, List) + If not None, apply the given graph of FFmpeg filters to each + ndimage. The graph is given as a tuple of two dicts. The first dict + contains a (named) set of nodes, and the second dict contains a set + of edges between nodes of the previous dict. Check the + (module-level) plugin docs for details and examples. + + Notes + ----- + Changing a filter graph with lag during reading or writing will + currently cause frames in the filter queue to be lost. + + """ + + if filter_sequence is None and filter_graph is None: + self._video_filter = None + return + + if filter_sequence is None: + filter_sequence = list() + + node_descriptors: Dict[str, Tuple[str, Union[str, Dict]]] + edges: List[Tuple[str, str, int, int]] + if filter_graph is None: + node_descriptors, edges = dict(), [("video_in", "video_out", 0, 0)] + else: + node_descriptors, edges = filter_graph + + graph = av.filter.Graph() + + previous_node = graph.add_buffer(template=self._video_stream) + for filter_name, argument in filter_sequence: + if isinstance(argument, str): + current_node = graph.add(filter_name, argument) + else: + current_node = graph.add(filter_name, **argument) + previous_node.link_to(current_node) + previous_node = current_node + + nodes = dict() + nodes["video_in"] = previous_node + nodes["video_out"] = graph.add("buffersink") + for name, (filter_name, arguments) in node_descriptors.items(): + if isinstance(arguments, str): + nodes[name] = graph.add(filter_name, arguments) + else: + nodes[name] = graph.add(filter_name, **arguments) + + for from_note, to_node, out_idx, in_idx in edges: + nodes[from_note].link_to(nodes[to_node], out_idx, in_idx) + + graph.configure() + + def video_filter(): + # this starts a co-routine + # send frames using graph.send() + frame = yield None + + # send and receive frames in "parallel" + while frame is not None: + graph.push(frame) + try: + frame = yield graph.pull() + except av.error.BlockingIOError: + # filter has lag and needs more frames + frame = yield None + except av.error.EOFError: + break + + try: + # send EOF in av>=9.0 + graph.push(None) + except ValueError: # pragma: no cover + # handle av<9.0 + pass + + # all frames have been sent, empty the filter + while True: + try: + yield graph.pull() + except av.error.EOFError: + break # EOF + except av.error.BlockingIOError: # pragma: no cover + # handle av<9.0 + break + + self._video_filter = video_filter() + self._video_filter.send(None) + + @property + def container_metadata(self): + """Container-specific metadata. + + A dictionary containing metadata stored at the container level. + + """ + return self._container.metadata + + @property + def video_stream_metadata(self): + """Stream-specific metadata. + + A dictionary containing metadata stored at the stream level. + + """ + return self._video_stream.metadata + + # ------------------------------- + # Internals and private functions + # ------------------------------- + + def _unpack_frame(self, frame: av.VideoFrame, *, format: str = None) -> np.ndarray: + """Convert a av.VideoFrame into a ndarray + + Parameters + ---------- + frame : av.VideoFrame + The frame to unpack. + format : str + If not None, convert the frame to the given format before unpacking. + + """ + + if format is not None: + frame = frame.reformat(format=format) + + dtype = _format_to_dtype(frame.format) + shape = _get_frame_shape(frame) + + planes = list() + for idx in range(len(frame.planes)): + n_channels = sum( + [ + x.bits // (dtype.itemsize * 8) + for x in frame.format.components + if x.plane == idx + ] + ) + av_plane = frame.planes[idx] + plane_shape = (av_plane.height, av_plane.width) + plane_strides = (av_plane.line_size, n_channels * dtype.itemsize) + if n_channels > 1: + plane_shape += (n_channels,) + plane_strides += (dtype.itemsize,) + + np_plane = as_strided( + np.frombuffer(av_plane, dtype=dtype), + shape=plane_shape, + strides=plane_strides, + ) + planes.append(np_plane) + + if len(planes) > 1: + # Note: the planes *should* exist inside a contigous memory block + # somewhere inside av.Frame however pyAV does not appear to expose this, + # so we are forced to copy the planes individually instead of wrapping + # them :( + out = np.concatenate(planes).reshape(shape) + else: + out = planes[0] + + return out + + def _seek(self, index, *, constant_framerate: bool = True) -> Generator: + """Seeks to the frame at the given index.""" + + if index == self._next_idx: + return # fast path :) + + # we must decode at least once before we seek otherwise the + # returned frames become corrupt. + if self._next_idx == 0: + next(self._decoder) + self._next_idx += 1 + + if index == self._next_idx: + return # fast path :) + + # remove this branch until I find a way to efficiently find the next + # keyframe. keeping this as a reminder + # if self._next_idx < index and index < self._next_keyframe_idx: + # frames_to_yield = index - self._next_idx + if not constant_framerate and index > self._next_idx: + frames_to_yield = index - self._next_idx + elif not constant_framerate: + # seek backwards and can't link idx and pts + self._container.seek(0) + self._decoder = self._container.decode(video=0) + self._next_idx = 0 + + frames_to_yield = index + else: + # we know that the time between consecutive frames is constant + # hence we can link index and pts + + # how many pts lie between two frames + sec_delta = 1 / self._video_stream.guessed_rate + pts_delta = sec_delta / self._video_stream.time_base + + index_pts = int(index * pts_delta) + + # this only seeks to the closed (preceeding) keyframe + self._container.seek(index_pts, stream=self._video_stream) + self._decoder = self._container.decode(video=0) + + # this may be made faster if we could get the keyframe's time without + # decoding it + keyframe = next(self._decoder) + keyframe_time = keyframe.pts * keyframe.time_base + keyframe_pts = int(keyframe_time / self._video_stream.time_base) + keyframe_index = keyframe_pts // pts_delta + + self._container.seek(index_pts, stream=self._video_stream) + self._next_idx = keyframe_index + + frames_to_yield = index - keyframe_index + + for _ in range(frames_to_yield): + next(self._decoder) + self._next_idx += 1 + + def _flush_writer(self): + """Flush the filter and encoder + + This will reset the filter to `None` and send EoF to the encoder, + i.e., after calling, no more frames may be written. + + """ + + stream = self._video_stream + + if self._video_filter is not None: + # flush encoder + for av_frame in self._video_filter: + if stream.frames == 0: + stream.width = av_frame.width + stream.height = av_frame.height + for packet in stream.encode(av_frame): + self._container.mux(packet) + self._video_filter = None + + # flush stream + for packet in stream.encode(): + self._container.mux(packet) + self._video_stream = None diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/rawpy.py b/parrot/lib/python3.10/site-packages/imageio/plugins/rawpy.py new file mode 100644 index 0000000000000000000000000000000000000000..bde908f20f0476f4aabffe504127a8e9acc52fbd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/rawpy.py @@ -0,0 +1,191 @@ +""" Read/Write images using rawpy. + +rawpy is an easy-to-use Python wrapper for the LibRaw library. +It also contains some extra functionality for finding and repairing hot/dead pixels. +""" + +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union +import rawpy +import numpy as np + +from ..core.request import URI_BYTES, InitializationError, IOMode, Request +from ..core.v3_plugin_api import ImageProperties, PluginV3 +from ..typing import ArrayLike + + +class RawPyPlugin(PluginV3): + """A class representing the rawpy plugin. + + Methods + ------- + + .. autosummary:: + :toctree: _plugins/rawpy + + RawPyPlugin.read + """ + + def __init__(self, request: Request) -> None: + """Instantiates a new rawpy plugin object + + Parameters + ---------- + request: Request + A request object representing the resource to be operated on. + """ + + super().__init__(request) + + self._image_file = None + + if request.mode.io_mode == IOMode.read: + try: + self._image_file = rawpy.imread(request.get_file()) + except ( + rawpy.NotSupportedError, + rawpy.LibRawFileUnsupportedError, + rawpy.LibRawIOError, + ): + if request._uri_type == URI_BYTES: + raise InitializationError( + "RawPy can not read the provided bytes." + ) from None + else: + raise InitializationError( + f"RawPy can not read {request.raw_uri}." + ) from None + elif request.mode.io_mode == IOMode.write: + raise InitializationError("RawPy does not support writing.") from None + + def close(self) -> None: + if self._image_file: + self._image_file.close() + + self._request.finish() + + def read(self, *, index: int = 0, **kwargs) -> np.ndarray: + """Read Raw Image. + + Returns + ------- + nd_image: ndarray + The image data + """ + + nd_image: np.ndarray + + try: + nd_image = self._image_file.postprocess(**kwargs) + except Exception: + pass + + if index is Ellipsis: + nd_image = nd_image[None, ...] + + return nd_image + + def write(self, ndimage: Union[ArrayLike, List[ArrayLike]]) -> Optional[bytes]: + """RawPy does not support writing.""" + raise NotImplementedError() + + def iter(self) -> Iterator[np.ndarray]: + """Load the image. + + Returns + ------- + nd_image: ndarray + The image data + """ + + try: + yield self.read() + except Exception: + pass + + def metadata( + self, index: int = None, exclude_applied: bool = True + ) -> Dict[str, Any]: + """Read ndimage metadata. + + Parameters + ---------- + exclude_applied : bool + If True, exclude metadata fields that are applied to the image while + reading. For example, if the binary data contains a rotation flag, + the image is rotated by default and the rotation flag is excluded + from the metadata to avoid confusion. + + Returns + ------- + metadata : dict + A dictionary of format-specific metadata. + + """ + + metadata = {} + + image_size = self._image_file.sizes + + metadata["black_level_per_channel"] = self._image_file.black_level_per_channel + metadata["camera_white_level_per_channel"] = ( + self._image_file.camera_white_level_per_channel + ) + metadata["color_desc"] = self._image_file.color_desc + metadata["color_matrix"] = self._image_file.color_matrix + metadata["daylight_whitebalance"] = self._image_file.daylight_whitebalance + metadata["dtype"] = self._image_file.raw_image.dtype + metadata["flip"] = image_size.flip + metadata["num_colors"] = self._image_file.num_colors + metadata["tone_curve"] = self._image_file.tone_curve + metadata["width"] = image_size.width + metadata["height"] = image_size.height + metadata["raw_width"] = image_size.raw_width + metadata["raw_height"] = image_size.raw_height + metadata["raw_shape"] = self._image_file.raw_image.shape + metadata["iwidth"] = image_size.iwidth + metadata["iheight"] = image_size.iheight + metadata["pixel_aspect"] = image_size.pixel_aspect + metadata["white_level"] = self._image_file.white_level + + if exclude_applied: + metadata.pop("black_level_per_channel", None) + metadata.pop("camera_white_level_per_channel", None) + metadata.pop("color_desc", None) + metadata.pop("color_matrix", None) + metadata.pop("daylight_whitebalance", None) + metadata.pop("dtype", None) + metadata.pop("flip", None) + metadata.pop("num_colors", None) + metadata.pop("tone_curve", None) + metadata.pop("raw_width", None) + metadata.pop("raw_height", None) + metadata.pop("raw_shape", None) + metadata.pop("iwidth", None) + metadata.pop("iheight", None) + metadata.pop("white_level", None) + + return metadata + + def properties(self, index: int = None) -> ImageProperties: + """Standardized ndimage metadata + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + This does not decode pixel data and is fast for large images. + + """ + + ImageSize = self._image_file.sizes + + width: int = ImageSize.width + height: int = ImageSize.height + shape: Tuple[int, ...] = (height, width) + + dtype = self._image_file.raw_image.dtype + + return ImageProperties(shape=shape, dtype=dtype) diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/simpleitk.py b/parrot/lib/python3.10/site-packages/imageio/plugins/simpleitk.py new file mode 100644 index 0000000000000000000000000000000000000000..dfaa066c12c66f521b4d0afa2687bbb6d498c227 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/simpleitk.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write images using SimpleITK. + +Backend: `Insight Toolkit `_ + +.. note:: + To use this plugin you have to install its backend:: + + pip install imageio[itk] + +The ItkFormat uses the ITK or SimpleITK library to support a range of +ITK-related formats. It also supports a few common formats (e.g. PNG and JPEG). + +Parameters +---------- +None + +""" + +from ..core import Format, has_module + +_itk = None # Defer loading to load_lib() function. + + +def load_lib(): + global _itk, _read_function, _write_function + try: + import itk as _itk + + _read_function = _itk.imread + _write_function = _itk.imwrite + except ImportError: + try: + import SimpleITK as _itk + + _read_function = _itk.ReadImage + _write_function = _itk.WriteImage + except ImportError: + raise ImportError( + "itk could not be found. " + "Please try " + " python -m pip install itk " + "or " + " python -m pip install simpleitk " + "or refer to " + " https://itkpythonpackage.readthedocs.io/ " + "for further instructions." + ) + return _itk + + +# Split up in real ITK and all supported formats. +ITK_FORMATS = ( + ".gipl", + ".ipl", + ".mha", + ".mhd", + ".nhdr", + "nia", + "hdr", + ".nrrd", + ".nii", + ".nii.gz", + ".img", + ".img.gz", + ".vtk", + "hdf5", + "lsm", + "mnc", + "mnc2", + "mgh", + "mnc", + "pic", +) +ALL_FORMATS = ITK_FORMATS + ( + ".bmp", + ".jpeg", + ".jpg", + ".png", + ".tiff", + ".tif", + ".dicom", + ".dcm", + ".gdcm", +) + + +class ItkFormat(Format): + """See :mod:`imageio.plugins.simpleitk`""" + + def _can_read(self, request): + # If the request is a format that only this plugin can handle, + # we report that we can do it; a useful error will be raised + # when simpleitk is not installed. For the more common formats + # we only report that we can read if the library is installed. + if request.extension in ITK_FORMATS: + return True + if has_module("itk.ImageIOBase") or has_module("SimpleITK"): + return request.extension in ALL_FORMATS + + def _can_write(self, request): + if request.extension in ITK_FORMATS: + return True + if has_module("itk.ImageIOBase") or has_module("SimpleITK"): + return request.extension in ALL_FORMATS + + # -- reader + + class Reader(Format.Reader): + def _open(self, pixel_type=None, fallback_only=None, **kwargs): + if not _itk: + load_lib() + args = () + if pixel_type is not None: + args += (pixel_type,) + if fallback_only is not None: + args += (fallback_only,) + self._img = _read_function(self.request.get_local_filename(), *args) + + def _get_length(self): + return 1 + + def _close(self): + pass + + def _get_data(self, index): + # Get data + if index != 0: + error_msg = "Index out of range while reading from itk file" + raise IndexError(error_msg) + + # Return array and empty meta data + return _itk.GetArrayFromImage(self._img), {} + + def _get_meta_data(self, index): + error_msg = "The itk plugin does not support meta data, currently." + raise RuntimeError(error_msg) + + # -- writer + class Writer(Format.Writer): + def _open(self): + if not _itk: + load_lib() + + def _close(self): + pass + + def _append_data(self, im, meta): + _itk_img = _itk.GetImageFromArray(im) + _write_function(_itk_img, self.request.get_local_filename()) + + def set_meta_data(self, meta): + error_msg = "The itk plugin does not support meta data, currently." + raise RuntimeError(error_msg) diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/spe.py b/parrot/lib/python3.10/site-packages/imageio/plugins/spe.py new file mode 100644 index 0000000000000000000000000000000000000000..f56dd52eba1c8c171d1e0fe35172509c315b8ebe --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/spe.py @@ -0,0 +1,955 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read SPE files. + +This plugin supports reading files saved in the Princeton Instruments +SPE file format. + +Parameters +---------- +check_filesize : bool + The number of frames in the file is stored in the file header. However, + this number may be wrong for certain software. If this is `True` + (default), derive the number of frames also from the file size and + raise a warning if the two values do not match. +char_encoding : str + Deprecated. Exists for backwards compatibility; use ``char_encoding`` of + ``metadata`` instead. +sdt_meta : bool + Deprecated. Exists for backwards compatibility; use ``sdt_control`` of + ``metadata`` instead. + +Methods +------- +.. note:: + Check the respective function for a list of supported kwargs and detailed + documentation. + +.. autosummary:: + :toctree: + + SpePlugin.read + SpePlugin.iter + SpePlugin.properties + SpePlugin.metadata + +""" + +from datetime import datetime +import logging +import os +from typing import ( + Any, + Callable, + Dict, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) +import warnings + +import numpy as np + +from ..core.request import Request, IOMode, InitializationError +from ..core.v3_plugin_api import PluginV3, ImageProperties + + +logger = logging.getLogger(__name__) + + +class Spec: + """SPE file specification data + + Tuples of (offset, datatype, count), where offset is the offset in the SPE + file and datatype is the datatype as used in `numpy.fromfile`() + + `data_start` is the offset of actual image data. + + `dtypes` translates SPE datatypes (0...4) to numpy ones, e. g. dtypes[0] + is dtype(" Tuple[int, int]: + """Get the version of SDT-control metadata encoded in the comments + + Parameters + ---------- + comments + List of SPE file comments, typically ``metadata["comments"]``. + + Returns + ------- + Major and minor version. ``-1, -1`` if detection failed. + """ + if comments[4][70:76] != "COMVER": + return -1, -1 + try: + return int(comments[4][76:78]), int(comments[4][78:80]) + except ValueError: + return -1, -1 + + @staticmethod + def parse_comments( + comments: Sequence[str], version: Tuple[int, int] + ) -> Dict[str, Any]: + """Extract SDT-control metadata from comments + + Parameters + ---------- + comments + List of SPE file comments, typically ``metadata["comments"]``. + version + Major and minor version of SDT-control metadata format + + Returns + ------- + Dict of metadata + """ + sdt_md = {} + for minor in range(version[1] + 1): + # Metadata with same major version is backwards compatible. + # Fields are specified incrementally in `comment_fields`. + # E.g. if the file has version 5.01, `comment_fields[5, 0]` and + # `comment_fields[5, 1]` need to be decoded. + try: + cmt = __class__.comment_fields[version[0], minor] + except KeyError: + continue + for name, spec in cmt.items(): + try: + v = spec.cvt(comments[spec.n][spec.slice]) + if spec.scale is not None: + v *= spec.scale + sdt_md[name] = v + except Exception as e: + warnings.warn( + f"Failed to decode SDT-control metadata field `{name}`: {e}" + ) + sdt_md[name] = None + if version not in __class__.comment_fields: + supported_ver = ", ".join( + map(lambda x: f"{x[0]}.{x[1]:02}", __class__.comment_fields) + ) + warnings.warn( + f"Unsupported SDT-control metadata version {version[0]}.{version[1]:02}. " + f"Only versions {supported_ver} are supported. " + "Some or all SDT-control metadata may be missing." + ) + comment = comments[0] + comments[2] + sdt_md["comment"] = comment.strip() + return sdt_md + + @staticmethod + def get_datetime(date: str, time: str) -> Union[datetime, None]: + """Turn date and time saved by SDT-control into proper datetime object + + Parameters + ---------- + date + SPE file date, typically ``metadata["date"]``. + time + SPE file date, typically ``metadata["time_local"]``. + + Returns + ------- + File's datetime if parsing was succsessful, else None. + """ + try: + month = __class__.months[date[2:5]] + return datetime( + int(date[5:9]), + month, + int(date[0:2]), + int(time[0:2]), + int(time[2:4]), + int(time[4:6]), + ) + except Exception as e: + logger.info(f"Failed to decode date from SDT-control metadata: {e}.") + + @staticmethod + def extract_metadata(meta: Mapping, char_encoding: str = "latin1"): + """Extract SDT-control metadata from SPE metadata + + SDT-control stores some metadata in comments and other fields. + Extract them and remove unused entries. + + Parameters + ---------- + meta + SPE file metadata. Modified in place. + char_encoding + Character encoding used to decode strings in the metadata. + """ + comver = __class__.get_comment_version(meta["comments"]) + if any(c < 0 for c in comver): + # This file most likely was not created by SDT-control + logger.debug("SDT-control comments not found.") + return + + sdt_meta = __class__.parse_comments(meta["comments"], comver) + meta.pop("comments") + meta.update(sdt_meta) + + # Get date and time in a usable format + dt = __class__.get_datetime(meta["date"], meta["time_local"]) + if dt: + meta["datetime"] = dt + meta.pop("date") + meta.pop("time_local") + + sp4 = meta["spare_4"] + try: + meta["modulation_script"] = sp4.decode(char_encoding) + meta.pop("spare_4") + except UnicodeDecodeError: + warnings.warn( + "Failed to decode SDT-control laser " + "modulation script. Bad char_encoding?" + ) + + # Get rid of unused data + meta.pop("time_utc") + meta.pop("exposure_sec") + + +class SpePlugin(PluginV3): + def __init__( + self, + request: Request, + check_filesize: bool = True, + char_encoding: Optional[str] = None, + sdt_meta: Optional[bool] = None, + ) -> None: + """Instantiate a new SPE file plugin object + + Parameters + ---------- + request : Request + A request object representing the resource to be operated on. + check_filesize : bool + If True, compute the number of frames from the filesize, compare it + to the frame count in the file header, and raise a warning if the + counts don't match. (Certain software may create files with + char_encoding : str + Deprecated. Exists for backwards compatibility; use ``char_encoding`` of + ``metadata`` instead. + sdt_meta : bool + Deprecated. Exists for backwards compatibility; use ``sdt_control`` of + ``metadata`` instead. + + """ + + super().__init__(request) + if request.mode.io_mode == IOMode.write: + raise InitializationError("cannot write SPE files") + + if char_encoding is not None: + warnings.warn( + "Passing `char_encoding` to the constructor is deprecated. " + "Use `char_encoding` parameter of the `metadata()` method " + "instead.", + DeprecationWarning, + ) + self._char_encoding = char_encoding + if sdt_meta is not None: + warnings.warn( + "Passing `sdt_meta` to the constructor is deprecated. " + "Use `sdt_control` parameter of the `metadata()` method " + "instead.", + DeprecationWarning, + ) + self._sdt_meta = sdt_meta + + self._file = self.request.get_file() + + try: + # Spec.basic contains no string, no need to worry about character + # encoding. + info = self._parse_header(Spec.basic, "latin1") + self._file_header_ver = info["file_header_ver"] + self._dtype = Spec.dtypes[info["datatype"]] + self._shape = (info["ydim"], info["xdim"]) + self._len = info["NumFrames"] + + if check_filesize: + # Some software writes incorrect `NumFrames` metadata. + # To determine the number of frames, check the size of the data + # segment -- until the end of the file for SPE<3, until the + # xml footer for SPE>=3. + if info["file_header_ver"] >= 3: + data_end = info["xml_footer_offset"] + else: + self._file.seek(0, os.SEEK_END) + data_end = self._file.tell() + line = data_end - Spec.data_start + line //= self._shape[0] * self._shape[1] * self._dtype.itemsize + if line != self._len: + warnings.warn( + f"The file header of {self.request.filename} claims there are " + f"{self._len} frames, but there are actually {line} frames." + ) + self._len = min(line, self._len) + self._file.seek(Spec.data_start) + except Exception: + raise InitializationError("SPE plugin cannot read the provided file.") + + def read(self, *, index: int = ...) -> np.ndarray: + """Read a frame or all frames from the file + + Parameters + ---------- + index : int + Select the index-th frame from the file. If index is `...`, + select all frames and stack them along a new axis. + + Returns + ------- + A Numpy array of pixel values. + + """ + + if index is Ellipsis: + read_offset = Spec.data_start + count = self._shape[0] * self._shape[1] * self._len + out_shape = (self._len, *self._shape) + elif index < 0: + raise IndexError(f"Index `{index}` is smaller than 0.") + elif index >= self._len: + raise IndexError( + f"Index `{index}` exceeds the number of frames stored in this file (`{self._len}`)." + ) + else: + read_offset = ( + Spec.data_start + + index * self._shape[0] * self._shape[1] * self._dtype.itemsize + ) + count = self._shape[0] * self._shape[1] + out_shape = self._shape + + self._file.seek(read_offset) + data = np.fromfile(self._file, dtype=self._dtype, count=count) + return data.reshape(out_shape) + + def iter(self) -> Iterator[np.ndarray]: + """Iterate over the frames in the file + + Yields + ------ + A Numpy array of pixel values. + """ + + return (self.read(index=i) for i in range(self._len)) + + def metadata( + self, + index: int = ..., + exclude_applied: bool = True, + char_encoding: str = "latin1", + sdt_control: bool = True, + ) -> Dict[str, Any]: + """SPE specific metadata. + + Parameters + ---------- + index : int + Ignored as SPE files only store global metadata. + exclude_applied : bool + Ignored. Exists for API compatibility. + char_encoding : str + The encoding to use when parsing strings. + sdt_control : bool + If `True`, decode special metadata written by the + SDT-control software if present. + + Returns + ------- + metadata : dict + Key-value pairs of metadata. + + Notes + ----- + SPE v3 stores metadata as XML, whereas SPE v2 uses a binary format. + + .. rubric:: Supported SPE v2 Metadata fields + + ROIs : list of dict + Regions of interest used for recording images. Each dict has the + "top_left" key containing x and y coordinates of the top left corner, + the "bottom_right" key with x and y coordinates of the bottom right + corner, and the "bin" key with number of binned pixels in x and y + directions. + comments : list of str + The SPE format allows for 5 comment strings of 80 characters each. + controller_version : int + Hardware version + logic_output : int + Definition of output BNC + amp_hi_cap_low_noise : int + Amp switching mode + mode : int + Timing mode + exp_sec : float + Alternative exposure in seconds + date : str + Date string + detector_temp : float + Detector temperature + detector_type : int + CCD / diode array type + st_diode : int + Trigger diode + delay_time : float + Used with async mode + shutter_control : int + Normal, disabled open, or disabled closed + absorb_live : bool + on / off + absorb_mode : int + Reference strip or file + can_do_virtual_chip : bool + True or False whether chip can do virtual chip + threshold_min_live : bool + on / off + threshold_min_val : float + Threshold minimum value + threshold_max_live : bool + on / off + threshold_max_val : float + Threshold maximum value + time_local : str + Experiment local time + time_utc : str + Experiment UTC time + adc_offset : int + ADC offset + adc_rate : int + ADC rate + adc_type : int + ADC type + adc_resolution : int + ADC resolution + adc_bit_adjust : int + ADC bit adjust + gain : int + gain + sw_version : str + Version of software which created this file + spare_4 : bytes + Reserved space + readout_time : float + Experiment readout time + type : str + Controller type + clockspeed_us : float + Vertical clock speed in microseconds + readout_mode : ["full frame", "frame transfer", "kinetics", ""] + Readout mode. Empty string means that this was not set by the + Software. + window_size : int + Window size for Kinetics mode + file_header_ver : float + File header version + chip_size : [int, int] + x and y dimensions of the camera chip + virt_chip_size : [int, int] + Virtual chip x and y dimensions + pre_pixels : [int, int] + Pre pixels in x and y dimensions + post_pixels : [int, int], + Post pixels in x and y dimensions + geometric : list of {"rotate", "reverse", "flip"} + Geometric operations + sdt_major_version : int + (only for files created by SDT-control) + Major version of SDT-control software + sdt_minor_version : int + (only for files created by SDT-control) + Minor version of SDT-control software + sdt_controller_name : str + (only for files created by SDT-control) + Controller name + exposure_time : float + (only for files created by SDT-control) + Exposure time in seconds + color_code : str + (only for files created by SDT-control) + Color channels used + detection_channels : int + (only for files created by SDT-control) + Number of channels + background_subtraction : bool + (only for files created by SDT-control) + Whether background subtraction war turned on + em_active : bool + (only for files created by SDT-control) + Whether EM was turned on + em_gain : int + (only for files created by SDT-control) + EM gain + modulation_active : bool + (only for files created by SDT-control) + Whether laser modulation (“attenuate”) was turned on + pixel_size : float + (only for files created by SDT-control) + Camera pixel size + sequence_type : str + (only for files created by SDT-control) + Type of sequnce (standard, TOCCSL, arbitrary, …) + grid : float + (only for files created by SDT-control) + Sequence time unit (“grid size”) in seconds + n_macro : int + (only for files created by SDT-control) + Number of macro loops + delay_macro : float + (only for files created by SDT-control) + Time between macro loops in seconds + n_mini : int + (only for files created by SDT-control) + Number of mini loops + delay_mini : float + (only for files created by SDT-control) + Time between mini loops in seconds + n_micro : int (only for files created by SDT-control) + Number of micro loops + delay_micro : float (only for files created by SDT-control) + Time between micro loops in seconds + n_subpics : int + (only for files created by SDT-control) + Number of sub-pictures + delay_shutter : float + (only for files created by SDT-control) + Camera shutter delay in seconds + delay_prebleach : float + (only for files created by SDT-control) + Pre-bleach delay in seconds + bleach_time : float + (only for files created by SDT-control) + Bleaching time in seconds + recovery_time : float + (only for files created by SDT-control) + Recovery time in seconds + comment : str + (only for files created by SDT-control) + User-entered comment. This replaces the "comments" field. + datetime : datetime.datetime + (only for files created by SDT-control) + Combines the "date" and "time_local" keys. The latter two plus + "time_utc" are removed. + modulation_script : str + (only for files created by SDT-control) + Laser modulation script. Replaces the "spare_4" key. + bleach_piezo_active : bool + (only for files created by SDT-control) + Whether piezo for bleaching was enabled + """ + + if self._file_header_ver < 3: + if self._char_encoding is not None: + char_encoding = self._char_encoding + if self._sdt_meta is not None: + sdt_control = self._sdt_meta + return self._metadata_pre_v3(char_encoding, sdt_control) + return self._metadata_post_v3() + + def _metadata_pre_v3(self, char_encoding: str, sdt_control: bool) -> Dict[str, Any]: + """Extract metadata from SPE v2 files + + Parameters + ---------- + char_encoding + String character encoding + sdt_control + If `True`, try to decode special metadata written by the + SDT-control software. + + Returns + ------- + dict mapping metadata names to values. + + """ + + m = self._parse_header(Spec.metadata, char_encoding) + + nr = m.pop("NumROI", None) + nr = 1 if nr < 1 else nr + m["ROIs"] = roi_array_to_dict(m["ROIs"][:nr]) + + # chip sizes + m["chip_size"] = [m.pop(k, None) for k in ("xDimDet", "yDimDet")] + m["virt_chip_size"] = [m.pop(k, None) for k in ("VChipXdim", "VChipYdim")] + m["pre_pixels"] = [m.pop(k, None) for k in ("XPrePixels", "YPrePixels")] + m["post_pixels"] = [m.pop(k, None) for k in ("XPostPixels", "YPostPixels")] + + # convert comments from numpy.str_ to str + m["comments"] = [str(c) for c in m["comments"]] + + # geometric operations + g = [] + f = m.pop("geometric", 0) + if f & 1: + g.append("rotate") + if f & 2: + g.append("reverse") + if f & 4: + g.append("flip") + m["geometric"] = g + + # Make some additional information more human-readable + t = m["type"] + if 1 <= t <= len(Spec.controllers): + m["type"] = Spec.controllers[t - 1] + else: + m["type"] = None + r = m["readout_mode"] + if 1 <= r <= len(Spec.readout_modes): + m["readout_mode"] = Spec.readout_modes[r - 1] + else: + m["readout_mode"] = None + + # bools + for k in ( + "absorb_live", + "can_do_virtual_chip", + "threshold_min_live", + "threshold_max_live", + ): + m[k] = bool(m[k]) + + # Extract SDT-control metadata if desired + if sdt_control: + SDTControlSpec.extract_metadata(m, char_encoding) + + return m + + def _metadata_post_v3(self) -> Dict[str, Any]: + """Extract XML metadata from SPE v3 files + + Returns + ------- + dict with key `"__xml"`, whose value is the XML metadata + """ + + info = self._parse_header(Spec.basic, "latin1") + self._file.seek(info["xml_footer_offset"]) + xml = self._file.read() + return {"__xml": xml} + + def properties(self, index: int = ...) -> ImageProperties: + """Standardized ndimage metadata. + + Parameters + ---------- + index : int + If the index is an integer, select the index-th frame and return + its properties. If index is an Ellipsis (...), return the + properties of all frames in the file stacked along a new batch + dimension. + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + """ + + if index is Ellipsis: + return ImageProperties( + shape=(self._len, *self._shape), + dtype=self._dtype, + n_images=self._len, + is_batch=True, + ) + return ImageProperties(shape=self._shape, dtype=self._dtype, is_batch=False) + + def _parse_header( + self, spec: Mapping[str, Tuple], char_encoding: str + ) -> Dict[str, Any]: + """Get information from SPE file header + + Parameters + ---------- + spec + Maps header entry name to its location, data type description and + optionally number of entries. See :py:attr:`Spec.basic` and + :py:attr:`Spec.metadata`. + char_encoding + String character encoding + + Returns + ------- + Dict mapping header entry name to its value + """ + + ret = {} + # Decode each string from the numpy array read by np.fromfile + decode = np.vectorize(lambda x: x.decode(char_encoding)) + + for name, sp in spec.items(): + self._file.seek(sp[0]) + cnt = 1 if len(sp) < 3 else sp[2] + v = np.fromfile(self._file, dtype=sp[1], count=cnt) + if v.dtype.kind == "S" and name not in Spec.no_decode: + # Silently ignore string decoding failures + try: + v = decode(v) + except Exception: + warnings.warn( + f'Failed to decode "{name}" metadata ' + "string. Check `char_encoding` parameter." + ) + + try: + # For convenience, if the array contains only one single + # entry, return this entry itself. + v = v.item() + except ValueError: + v = np.squeeze(v) + ret[name] = v + return ret + + +def roi_array_to_dict(a: np.ndarray) -> List[Dict[str, List[int]]]: + """Convert the `ROIs` structured arrays to :py:class:`dict` + + Parameters + ---------- + a + Structured array containing ROI data + + Returns + ------- + One dict per ROI. Keys are "top_left", "bottom_right", and "bin", + values are tuples whose first element is the x axis value and the + second element is the y axis value. + """ + + dict_list = [] + a = a[["startx", "starty", "endx", "endy", "groupx", "groupy"]] + for sx, sy, ex, ey, gx, gy in a: + roi_dict = { + "top_left": [int(sx), int(sy)], + "bottom_right": [int(ex), int(ey)], + "bin": [int(gx), int(gy)], + } + dict_list.append(roi_dict) + return dict_list diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/swf.py b/parrot/lib/python3.10/site-packages/imageio/plugins/swf.py new file mode 100644 index 0000000000000000000000000000000000000000..9d507ddeeb8576de8fc3e11b5cdb865e448db17e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/swf.py @@ -0,0 +1,336 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write SWF files. + +Backend: internal + +Shockwave flash (SWF) is a media format designed for rich and +interactive animations. This plugin makes use of this format to +store a series of images in a lossless format with good compression +(zlib). The resulting images can be shown as an animation using +a flash player (such as the browser). + +SWF stores images in RGBA format. RGB or grayscale images are +automatically converted. SWF does not support meta data. + +Parameters for reading +---------------------- +loop : bool + If True, the video will rewind as soon as a frame is requested + beyond the last frame. Otherwise, IndexError is raised. Default False. + +Parameters for saving +--------------------- +fps : int + The speed to play the animation. Default 12. +loop : bool + If True, add a tag to the end of the file to play again from + the first frame. Most flash players will then play the movie + in a loop. Note that the imageio SWF Reader does not check this + tag. Default True. +html : bool + If the output is a file on the file system, write an html file + (in HTML5) that shows the animation. Default False. +compress : bool + Whether to compress the swf file. Default False. You probably don't + want to use this. This does not decrease the file size since + the images are already compressed. It will result in slower + read and write time. The only purpose of this feature is to + create compressed SWF files, so that we can test the + functionality to read them. + +""" + +import os +import zlib +import logging +from io import BytesIO + +import numpy as np + +from ..core import Format, read_n_bytes, image_as_uint + + +logger = logging.getLogger(__name__) + +_swf = None # lazily loaded in lib() + + +def load_lib(): + global _swf + from . import _swf + + return _swf + + +class SWFFormat(Format): + """See :mod:`imageio.plugins.swf`""" + + def _can_read(self, request): + tmp = request.firstbytes[0:3].decode("ascii", "ignore") + if tmp in ("FWS", "CWS"): + return True + + def _can_write(self, request): + if request.extension in self.extensions: + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, loop=False): + if not _swf: + load_lib() + + self._arg_loop = bool(loop) + + self._fp = self.request.get_file() + + # Check file ... + tmp = self.request.firstbytes[0:3].decode("ascii", "ignore") + if tmp == "FWS": + pass # OK + elif tmp == "CWS": + # Compressed, we need to decompress + bb = self._fp.read() + bb = bb[:8] + zlib.decompress(bb[8:]) + # Wrap up in a file object + self._fp = BytesIO(bb) + else: + raise IOError("This does not look like a valid SWF file") + + # Skip first bytes. This also tests support got seeking ... + try: + self._fp.seek(8) + self._streaming_mode = False + except Exception: + self._streaming_mode = True + self._fp_read(8) + + # Skip header + # Note that the number of frames is there, which we could + # potentially use, but the number of frames does not necessarily + # correspond to the number of images. + nbits = _swf.bits2int(self._fp_read(1), 5) + nbits = 5 + nbits * 4 + Lrect = nbits / 8.0 + if Lrect % 1: + Lrect += 1 + Lrect = int(Lrect) + self._fp_read(Lrect + 3) + + # Now the rest is basically tags ... + self._imlocs = [] # tuple (loc, sze, T, L1) + if not self._streaming_mode: + # Collect locations of frame, while skipping through the data + # This does not read any of the tag *data*. + try: + while True: + isimage, sze, T, L1 = self._read_one_tag() + loc = self._fp.tell() + if isimage: + # Still need to check if the format is right + format = ord(self._fp_read(3)[2:]) + if format == 5: # RGB or RGBA lossless + self._imlocs.append((loc, sze, T, L1)) + self._fp.seek(loc + sze) # Skip over tag + except IndexError: + pass # done reading + + def _fp_read(self, n): + return read_n_bytes(self._fp, n) + + def _close(self): + pass + + def _get_length(self): + if self._streaming_mode: + return np.inf + else: + return len(self._imlocs) + + def _get_data(self, index): + # Check index + if index < 0: + raise IndexError("Index in swf file must be > 0") + if not self._streaming_mode: + if self._arg_loop and self._imlocs: + index = index % len(self._imlocs) + if index >= len(self._imlocs): + raise IndexError("Index out of bounds") + + if self._streaming_mode: + # Walk over tags until we find an image + while True: + isimage, sze, T, L1 = self._read_one_tag() + bb = self._fp_read(sze) # always read data + if isimage: + im = _swf.read_pixels(bb, 0, T, L1) # can be None + if im is not None: + return im, {} + + else: + # Go to corresponding location, read data, and convert to image + loc, sze, T, L1 = self._imlocs[index] + self._fp.seek(loc) + bb = self._fp_read(sze) + # Read_pixels should return ndarry, since we checked format + im = _swf.read_pixels(bb, 0, T, L1) + return im, {} + + def _read_one_tag(self): + """ + Return (True, loc, size, T, L1) if an image that we can read. + Return (False, loc, size, T, L1) if any other tag. + """ + + # Get head + head = self._fp_read(6) + if not head: # pragma: no cover + raise IndexError("Reached end of swf movie") + + # Determine type and length + T, L1, L2 = _swf.get_type_and_len(head) + if not L2: # pragma: no cover + raise RuntimeError("Invalid tag length, could not proceed") + + # Read data + isimage = False + sze = L2 - 6 + # bb = self._fp_read(L2 - 6) + + # Parse tag + if T == 0: + raise IndexError("Reached end of swf movie") + elif T in [20, 36]: + isimage = True + # im = _swf.read_pixels(bb, 0, T, L1) # can be None + elif T in [6, 21, 35, 90]: # pragma: no cover + logger.warning("Ignoring JPEG image: cannot read JPEG.") + else: + pass # Not an image tag + + # Done. Return image. Can be None + # return im + return isimage, sze, T, L1 + + def _get_meta_data(self, index): + return {} # This format does not support meta data + + # -- writer + + class Writer(Format.Writer): + def _open(self, fps=12, loop=True, html=False, compress=False): + if not _swf: + load_lib() + + self._arg_fps = int(fps) + self._arg_loop = bool(loop) + self._arg_html = bool(html) + self._arg_compress = bool(compress) + + self._fp = self.request.get_file() + self._framecounter = 0 + self._framesize = (100, 100) + + # For compress, we use an in-memory file object + if self._arg_compress: + self._fp_real = self._fp + self._fp = BytesIO() + + def _close(self): + self._complete() + # Get size of (uncompressed) file + sze = self._fp.tell() + # set nframes, this is in the potentially compressed region + self._fp.seek(self._location_to_save_nframes) + self._fp.write(_swf.int2uint16(self._framecounter)) + # Compress body? + if self._arg_compress: + bb = self._fp.getvalue() + self._fp = self._fp_real + self._fp.write(bb[:8]) + self._fp.write(zlib.compress(bb[8:])) + sze = self._fp.tell() # renew sze value + # set size + self._fp.seek(4) + self._fp.write(_swf.int2uint32(sze)) + self._fp = None # Disable + + # Write html? + if self._arg_html and os.path.isfile(self.request.filename): + dirname, fname = os.path.split(self.request.filename) + filename = os.path.join(dirname, fname[:-4] + ".html") + w, h = self._framesize + html = HTML % (fname, w, h, fname) + with open(filename, "wb") as f: + f.write(html.encode("utf-8")) + + def _write_header(self, framesize, fps): + self._framesize = framesize + # Called as soon as we know framesize; when we get first frame + bb = b"" + bb += "FC"[self._arg_compress].encode("ascii") + bb += "WS".encode("ascii") # signature bytes + bb += _swf.int2uint8(8) # version + bb += "0000".encode("ascii") # FileLength (leave open for now) + bb += ( + _swf.Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes() + ) + bb += _swf.int2uint8(0) + _swf.int2uint8(fps) # FrameRate + self._location_to_save_nframes = len(bb) + bb += "00".encode("ascii") # nframes (leave open for now) + self._fp.write(bb) + + # Write some initial tags + taglist = _swf.FileAttributesTag(), _swf.SetBackgroundTag(0, 0, 0) + for tag in taglist: + self._fp.write(tag.get_tag()) + + def _complete(self): + # What if no images were saved? + if not self._framecounter: + self._write_header((10, 10), self._arg_fps) + # Write stop tag if we do not loop + if not self._arg_loop: + self._fp.write(_swf.DoActionTag("stop").get_tag()) + # finish with end tag + self._fp.write("\x00\x00".encode("ascii")) + + def _append_data(self, im, meta): + # Correct shape and type + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + im = image_as_uint(im, bitdepth=8) + # Get frame size + wh = im.shape[1], im.shape[0] + # Write header on first frame + isfirstframe = False + if self._framecounter == 0: + isfirstframe = True + self._write_header(wh, self._arg_fps) + # Create tags + bm = _swf.BitmapTag(im) + sh = _swf.ShapeTag(bm.id, (0, 0), wh) + po = _swf.PlaceObjectTag(1, sh.id, move=(not isfirstframe)) + sf = _swf.ShowFrameTag() + # Write tags + for tag in [bm, sh, po, sf]: + self._fp.write(tag.get_tag()) + self._framecounter += 1 + + def set_meta_data(self, meta): + pass + + +HTML = """ + + + + Show Flash animation %s + + + + +""" diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/tifffile.py b/parrot/lib/python3.10/site-packages/imageio/plugins/tifffile.py new file mode 100644 index 0000000000000000000000000000000000000000..190cfe2656181352e0a5fc87eeec51f46d5901f3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/tifffile.py @@ -0,0 +1,561 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Read/Write TIFF files. + +Backend: internal + +Provides support for a wide range of Tiff images using the tifffile +backend. + +Parameters for reading +---------------------- +offset : int + Optional start position of embedded file. By default this is + the current file position. +size : int + Optional size of embedded file. By default this is the number + of bytes from the 'offset' to the end of the file. +multifile : bool + If True (default), series may include pages from multiple files. + Currently applies to OME-TIFF only. +multifile_close : bool + If True (default), keep the handles of other files in multifile + series closed. This is inefficient when few files refer to + many pages. If False, the C runtime may run out of resources. + +Parameters for saving +--------------------- +bigtiff : bool + If True, the BigTIFF format is used. +byteorder : {'<', '>'} + The endianness of the data in the file. + By default this is the system's native byte order. +software : str + Name of the software used to create the image. + Saved with the first page only. + +Metadata for reading +-------------------- +planar_configuration : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. +resolution_unit : int + The resolution unit stored in the TIFF tag. Usually 1 means no/unknown unit, + 2 means dpi (inch), 3 means dpc (centimeter). +resolution : (float, float, str) + A tuple formatted as (X_resolution, Y_resolution, unit). The unit is a + string representing one of the following units:: + + NONE # No unit or unit unknown + INCH # dpi + CENTIMETER # cpi + MILLIMETER + MICROMETER + +compression : int + Value indicating the compression algorithm used, e.g. 5 is LZW, + 7 is JPEG, 8 is deflate. + If 1, data are uncompressed. +predictor : int + Value 2 indicates horizontal differencing was used before compression, + while 3 indicates floating point horizontal differencing. + If 1, no prediction scheme was used before compression. +orientation : {'top_left', 'bottom_right', ...} + Oriented of image array. +is_rgb : bool + True if page contains a RGB image. +is_contig : bool + True if page contains a contiguous image. +is_tiled : bool + True if page contains tiled image. +is_palette : bool + True if page contains a palette-colored image and not OME or STK. +is_reduced : bool + True if page is a reduced image of another image. +is_shaped : bool + True if page contains shape in image_description tag. +is_fluoview : bool + True if page contains FluoView MM_STAMP tag. +is_nih : bool + True if page contains NIH image header. +is_micromanager : bool + True if page contains Micro-Manager metadata. +is_ome : bool + True if page contains OME-XML in image_description tag. +is_sgi : bool + True if page contains SGI image and tile depth tags. +is_mdgel : bool + True if page contains md_file_tag tag. +is_mediacy : bool + True if page contains Media Cybernetics Id tag. +is_stk : bool + True if page contains UIC2Tag tag. +is_lsm : bool + True if page contains LSM CZ_LSM_INFO tag. +description : str + Image description +description1 : str + Additional description +is_imagej : None or str + ImageJ metadata +software : str + Software used to create the TIFF file +datetime : datetime.datetime + Creation date and time + +Metadata for writing +-------------------- +photometric : {'minisblack', 'miniswhite', 'rgb'} + The color space of the image data. + By default this setting is inferred from the data shape. +planarconfig : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. +resolution : (float, float) or ((int, int), (int, int)) + X and Y resolution in dots per inch as float or rational numbers. +description : str + The subject of the image. Saved with the first page only. +compress : int + Values from 0 to 9 controlling the level of zlib (deflate) compression. + If 0, data are written uncompressed (default). +compression : str, (int, int) + Compression scheme used while writing the image. If omitted (default) the + image is not uncompressed. Compression cannot be used to write contiguous + series. Compressors may require certain data shapes, types or value ranges. + For example, JPEG compression requires grayscale or RGB(A), uint8 or 12-bit + uint16. JPEG compression is experimental. JPEG markers and TIFF tags may not + match. Only a limited set of compression schemes are implemented. 'ZLIB' is + short for ADOBE_DEFLATE. The value is written to the Compression tag. +compressionargs: + Extra arguments passed to compression codec, e.g., compression level. Refer + to the Imagecodecs implementation for supported arguments. +predictor : bool + If True, horizontal differencing is applied before compression. + Note that using an int literal 1 actually means no prediction scheme + will be used. +volume : bool + If True, volume data are stored in one tile (if applicable) using + the SGI image_depth and tile_depth tags. + Image width and depth must be multiple of 16. + Few software can read this format, e.g. MeVisLab. +writeshape : bool + If True, write the data shape to the image_description tag + if necessary and no other description is given. +extratags: sequence of tuples + Additional tags as [(code, dtype, count, value, writeonce)]. + + code : int + The TIFF tag Id. + dtype : str + Data type of items in 'value' in Python struct format. + One of B, s, H, I, 2I, b, h, i, f, d, Q, or q. + count : int + Number of data values. Not used for string values. + value : sequence + 'Count' values compatible with 'dtype'. + writeonce : bool + If True, the tag is written to the first page only. + +Notes +----- +Global metadata is stored with the first frame in a TIFF file. +Thus calling :py:meth:`Format.Writer.set_meta_data` after the first frame +was written has no effect. Also, global metadata is ignored if metadata is +provided via the `meta` argument of :py:meth:`Format.Writer.append_data`. + +If you have installed tifffile as a Python package, imageio will attempt +to use that as backend instead of the bundled backend. Doing so can +provide access to new performance improvements and bug fixes. + +""" + +import datetime + +from ..core import Format +from ..core.request import URI_BYTES, URI_FILE + +import numpy as np +import warnings + + +try: + import tifffile as _tifffile +except ImportError: + warnings.warn( + "ImageIO's vendored tifffile backend is deprecated and will be" + " removed in ImageIO v3. Install the tifffile directly:" + " `pip install imageio[tifffile]`", + DeprecationWarning, + ) + from . import _tifffile + + +TIFF_FORMATS = (".tif", ".tiff", ".stk", ".lsm") +WRITE_METADATA_KEYS = ( + "photometric", + "planarconfig", + "resolution", + "description", + "compress", + "compression", + "compressionargs", + "predictor", + "volume", + "writeshape", + "extratags", + "datetime", +) +READ_METADATA_KEYS = ( + "planar_configuration", + "is_fluoview", + "is_nih", + "is_contig", + "is_micromanager", + "is_ome", + "is_lsm", + "is_palette", + "is_reduced", + "is_rgb", + "is_sgi", + "is_shaped", + "is_stk", + "is_tiled", + "is_mdgel", + "resolution_unit", + "compression", + "predictor", + "is_mediacy", + "orientation", + "description", + "description1", + "is_imagej", + "software", +) + + +class TiffFormat(Format): + """Provides support for a wide range of Tiff images using the tifffile + backend. + + Images that contain multiple pages can be read using ``imageio.mimread()`` + to read the individual pages, or ``imageio.volread()`` to obtain a + single (higher dimensional) array. + + Note that global metadata is stored with the first frame in a TIFF file. + Thus calling :py:meth:`Format.Writer.set_meta_data` after the first frame + was written has no effect. Also, global metadata is ignored if metadata is + provided via the `meta` argument of :py:meth:`Format.Writer.append_data`. + + If you have installed tifffile as a Python package, imageio will attempt + to use that as backend instead of the bundled backend. Doing so can + provide access to new performance improvements and bug fixes. + + Parameters for reading + ---------------------- + offset : int + Optional start position of embedded file. By default this is + the current file position. + size : int + Optional size of embedded file. By default this is the number + of bytes from the 'offset' to the end of the file. + multifile : bool + If True (default), series may include pages from multiple files. + Currently applies to OME-TIFF only. + multifile_close : bool + If True (default), keep the handles of other files in multifile + series closed. This is inefficient when few files refer to + many pages. If False, the C runtime may run out of resources. + + Parameters for saving + --------------------- + bigtiff : bool + If True, the BigTIFF format is used. + byteorder : {'<', '>'} + The endianness of the data in the file. + By default this is the system's native byte order. + software : str + Name of the software used to create the image. + Saved with the first page only. + + Metadata for reading + -------------------- + planar_configuration : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. + resolution_unit : (float, float) or ((int, int), (int, int)) + X and Y resolution in dots per inch as float or rational numbers. + compression : int + Value indicating the compression algorithm used, e.g. 5 is LZW, + 7 is JPEG, 8 is deflate. + If 1, data are uncompressed. + predictor : int + Value 2 indicates horizontal differencing was used before compression, + while 3 indicates floating point horizontal differencing. + If 1, no prediction scheme was used before compression. + orientation : {'top_left', 'bottom_right', ...} + Oriented of image array. + is_rgb : bool + True if page contains a RGB image. + is_contig : bool + True if page contains a contiguous image. + is_tiled : bool + True if page contains tiled image. + is_palette : bool + True if page contains a palette-colored image and not OME or STK. + is_reduced : bool + True if page is a reduced image of another image. + is_shaped : bool + True if page contains shape in image_description tag. + is_fluoview : bool + True if page contains FluoView MM_STAMP tag. + is_nih : bool + True if page contains NIH image header. + is_micromanager : bool + True if page contains Micro-Manager metadata. + is_ome : bool + True if page contains OME-XML in image_description tag. + is_sgi : bool + True if page contains SGI image and tile depth tags. + is_stk : bool + True if page contains UIC2Tag tag. + is_mdgel : bool + True if page contains md_file_tag tag. + is_mediacy : bool + True if page contains Media Cybernetics Id tag. + is_stk : bool + True if page contains UIC2Tag tag. + is_lsm : bool + True if page contains LSM CZ_LSM_INFO tag. + description : str + Image description + description1 : str + Additional description + is_imagej : None or str + ImageJ metadata + software : str + Software used to create the TIFF file + datetime : datetime.datetime + Creation date and time + + Metadata for writing + -------------------- + photometric : {'minisblack', 'miniswhite', 'rgb'} + The color space of the image data. + By default this setting is inferred from the data shape. + planarconfig : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. + resolution : (float, float) or ((int, int), (int, int)) + X and Y resolution in dots per inch as float or rational numbers. + description : str + The subject of the image. Saved with the first page only. + compress : int + Values from 0 to 9 controlling the level of zlib (deflate) compression. + If 0, data are written uncompressed (default). + predictor : bool + If True, horizontal differencing is applied before compression. + Note that using an int literal 1 actually means no prediction scheme + will be used. + volume : bool + If True, volume data are stored in one tile (if applicable) using + the SGI image_depth and tile_depth tags. + Image width and depth must be multiple of 16. + Few software can read this format, e.g. MeVisLab. + writeshape : bool + If True, write the data shape to the image_description tag + if necessary and no other description is given. + extratags: sequence of tuples + Additional tags as [(code, dtype, count, value, writeonce)]. + + code : int + The TIFF tag Id. + dtype : str + Data type of items in 'value' in Python struct format. + One of B, s, H, I, 2I, b, h, i, f, d, Q, or q. + count : int + Number of data values. Not used for string values. + value : sequence + 'Count' values compatible with 'dtype'. + writeonce : bool + If True, the tag is written to the first page only. + """ + + def _can_read(self, request): + try: + _tifffile.TiffFile(request.get_file(), **request.kwargs) + except ValueError: + # vendored backend raises value exception + return False + except _tifffile.TiffFileError: # pragma: no-cover + # current version raises custom exception + return False + finally: + request.get_file().seek(0) + + return True + + def _can_write(self, request): + if request._uri_type in [URI_FILE, URI_BYTES]: + pass # special URI + elif request.extension not in self.extensions: + return False + + try: + _tifffile.TiffWriter(request.get_file(), **request.kwargs) + except ValueError: + # vendored backend raises value exception + return False + except _tifffile.TiffFileError: # pragma: no-cover + # current version raises custom exception + return False + finally: + request.get_file().seek(0) + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, **kwargs): + # Allow loading from http; tifffile uses seek, so download first + if self.request.filename.startswith(("http://", "https://")): + self._f = f = open(self.request.get_local_filename(), "rb") + else: + self._f = None + f = self.request.get_file() + self._tf = _tifffile.TiffFile(f, **kwargs) + + def _close(self): + self._tf.close() + if self._f is not None: + self._f.close() + + def _get_length(self): + return len(self._tf.series) + + def _get_data(self, index): + if index < 0 or index >= self._get_length(): + raise IndexError("Index out of range while reading from tiff file") + + im = self._tf.asarray(series=index) + meta = self._get_meta_data(index) + + return im, meta + + def _get_meta_data(self, index): + meta = {} + page = self._tf.pages[index or 0] + for key in READ_METADATA_KEYS: + try: + meta[key] = getattr(page, key) + except Exception: + pass + + # tifffile <= 0.12.1 use datetime, newer use DateTime + for key in ("datetime", "DateTime"): + try: + meta["datetime"] = datetime.datetime.strptime( + page.tags[key].value, "%Y:%m:%d %H:%M:%S" + ) + break + except Exception: + pass + + if 296 in page.tags: + meta["resolution_unit"] = page.tags[296].value.value + + if 282 in page.tags and 283 in page.tags and 296 in page.tags: + resolution_x = page.tags[282].value + resolution_y = page.tags[283].value + if resolution_x[1] == 0 or resolution_y[1] == 0: + warnings.warn( + "Ignoring resolution metadata, " + "because at least one direction has a 0 denominator.", + RuntimeWarning, + ) + else: + meta["resolution"] = ( + resolution_x[0] / resolution_x[1], + resolution_y[0] / resolution_y[1], + page.tags[296].value.name, + ) + + return meta + + # -- writer + class Writer(Format.Writer): + def _open(self, bigtiff=None, byteorder=None, software=None): + try: + self._tf = _tifffile.TiffWriter( + self.request.get_file(), + bigtiff=bigtiff, + byteorder=byteorder, + software=software, + ) + self._software = None + except TypeError: + # In tifffile >= 0.15, the `software` arg is passed to + # TiffWriter.save + self._tf = _tifffile.TiffWriter( + self.request.get_file(), bigtiff=bigtiff, byteorder=byteorder + ) + self._software = software + + self._meta = {} + self._frames_written = 0 + + def _close(self): + self._tf.close() + + def _append_data(self, im, meta): + if meta is not None: + meta = self._sanitize_meta(meta) + else: + # Use global metadata for first frame + meta = self._meta if self._frames_written == 0 else {} + if self._software is not None and self._frames_written == 0: + meta["software"] = self._software + # No need to check self.request.mode; tifffile figures out whether + # this is a single page, or all page data at once. + try: + # TiffWriter.save has been deprecated in version 2020.9.30 + write_meth = self._tf.write + except AttributeError: + write_meth = self._tf.save + write_meth(np.asanyarray(im), contiguous=False, **meta) + self._frames_written += 1 + + @staticmethod + def _sanitize_meta(meta): + ret = {} + for key, value in meta.items(): + if key in WRITE_METADATA_KEYS: + # Special case of previously read `predictor` int value + # 1(=NONE) translation to False expected by TiffWriter.save + if key == "predictor" and not isinstance(value, bool): + ret[key] = value > 1 + elif key == "compress" and value != 0: + warnings.warn( + "The use of `compress` is deprecated. Use `compression` and `compressionargs` instead.", + DeprecationWarning, + ) + + if _tifffile.__version__ < "2022": + ret["compression"] = (8, value) + else: + ret["compression"] = "zlib" + ret["compressionargs"] = {"level": value} + else: + ret[key] = value + return ret + + def set_meta_data(self, meta): + self._meta = self._sanitize_meta(meta) diff --git a/parrot/lib/python3.10/site-packages/imageio/plugins/tifffile_v3.py b/parrot/lib/python3.10/site-packages/imageio/plugins/tifffile_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..054eaf1a50ceae3c056b625682af4f68ba8d3f9d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/plugins/tifffile_v3.py @@ -0,0 +1,413 @@ +"""Read/Write TIFF files using tifffile. + +.. note:: + To use this plugin you need to have `tifffile + `_ installed:: + + pip install tifffile + +This plugin wraps tifffile, a powerful library to manipulate TIFF files. It +superseeds our previous tifffile plugin and aims to expose all the features of +tifffile. + +The plugin treats individual TIFF series as ndimages. A series is a sequence of +TIFF pages that, when combined describe a meaningful unit, e.g., a volumetric +image (where each slice is stored on an individual page) or a multi-color +staining picture (where each stain is stored on an individual page). Different +TIFF flavors/variants use series in different ways and, as such, the resulting +reading behavior may vary depending on the program used while creating a +particular TIFF file. + +Methods +------- +.. note:: + Check the respective function for a list of supported kwargs and detailed + documentation. + +.. autosummary:: + :toctree: + + TifffilePlugin.read + TifffilePlugin.iter + TifffilePlugin.write + TifffilePlugin.properties + TifffilePlugin.metadata + +Additional methods available inside the :func:`imopen ` +context: + +.. autosummary:: + :toctree: + + TifffilePlugin.iter_pages + +""" + +from io import BytesIO +from typing import Any, Dict, Optional, cast +import warnings + +import numpy as np +import tifffile + +from ..core.request import URI_BYTES, InitializationError, Request +from ..core.v3_plugin_api import ImageProperties, PluginV3 +from ..typing import ArrayLike + + +def _get_resolution(page: tifffile.TiffPage) -> Dict[str, Any]: + metadata = {} + + try: + metadata["resolution_unit"] = page.tags[296].value.value + except KeyError: + # tag 296 missing + return metadata + + try: + resolution_x = page.tags[282].value + resolution_y = page.tags[283].value + + metadata["resolution"] = ( + resolution_x[0] / resolution_x[1], + resolution_y[0] / resolution_y[1], + ) + except KeyError: + # tag 282 or 283 missing + pass + except ZeroDivisionError: + warnings.warn( + "Ignoring resolution metadata because at least one direction has a 0 " + "denominator.", + RuntimeWarning, + ) + + return metadata + + +class TifffilePlugin(PluginV3): + """Support for tifffile as backend. + + Parameters + ---------- + request : iio.Request + A request object that represents the users intent. It provides a + standard interface for a plugin to access the various ImageResources. + Check the docs for details. + kwargs : Any + Additional kwargs are forwarded to tifffile's constructor, i.e. + to ``TiffFile`` for reading or ``TiffWriter`` for writing. + + """ + + def __init__(self, request: Request, **kwargs) -> None: + super().__init__(request) + self._fh = None + + if request.mode.io_mode == "r": + try: + self._fh = tifffile.TiffFile(request.get_file(), **kwargs) + except tifffile.tifffile.TiffFileError: + raise InitializationError("Tifffile can not read this file.") + else: + self._fh = tifffile.TiffWriter(request.get_file(), **kwargs) + + # --------------------- + # Standard V3 Interface + # --------------------- + + def read(self, *, index: int = None, page: int = None, **kwargs) -> np.ndarray: + """Read a ndimage or page. + + The ndimage returned depends on the value of both ``index`` and + ``page``. ``index`` selects the series to read and ``page`` allows + selecting a single page from the selected series. If ``index=None``, + ``page`` is understood as a flat index, i.e., the selection ignores + individual series inside the file. If both ``index`` and ``page`` are + ``None``, then all the series are read and returned as a batch. + + Parameters + ---------- + index : int + If ``int``, select the ndimage (series) located at that index inside + the file and return ``page`` from it. If ``None`` and ``page`` is + ``int`` read the page located at that (flat) index inside the file. + If ``None`` and ``page=None``, read all ndimages from the file and + return them as a batch. + page : int + If ``None`` return the full selected ndimage. If ``int``, read the + page at the selected index and return it. + kwargs : Any + Additional kwargs are forwarded to TiffFile's ``as_array`` method. + + Returns + ------- + ndarray : np.ndarray + The decoded ndimage or page. + """ + + if "key" not in kwargs: + kwargs["key"] = page + elif page is not None: + raise ValueError("Can't use `page` and `key` at the same time.") + + # set plugin default for ``index`` + if index is not None and "series" in kwargs: + raise ValueError("Can't use `series` and `index` at the same time.") + elif "series" in kwargs: + index = kwargs.pop("series") + elif index is not None: + pass + else: + index = 0 + + if index is Ellipsis and page is None: + # read all series in the file and return them as a batch + ndimage = np.stack([x for x in self.iter(**kwargs)]) + else: + index = None if index is Ellipsis else index + ndimage = self._fh.asarray(series=index, **kwargs) + + return ndimage + + def iter(self, **kwargs) -> np.ndarray: + """Yield ndimages from the TIFF. + + Parameters + ---------- + kwargs : Any + Additional kwargs are forwarded to the TiffPageSeries' ``as_array`` + method. + + Yields + ------ + ndimage : np.ndarray + A decoded ndimage. + """ + + for sequence in self._fh.series: + yield sequence.asarray(**kwargs) + + def write( + self, ndimage: ArrayLike, *, is_batch: bool = False, **kwargs + ) -> Optional[bytes]: + """Save a ndimage as TIFF. + + Parameters + ---------- + ndimage : ArrayLike + The ndimage to encode and write to the ImageResource. + is_batch : bool + If True, the first dimension of the given ndimage is treated as a + batch dimension and each element will create a new series. + kwargs : Any + Additional kwargs are forwarded to TiffWriter's ``write`` method. + + Returns + ------- + encoded_image : bytes + If the ImageResource is ``""``, return the encoded bytes. + Otherwise write returns None. + + Notes + ----- + Incremental writing is supported. Subsequent calls to ``write`` will + create new series unless ``contiguous=True`` is used, in which case the + call to write will append to the current series. + + """ + + if not is_batch: + ndimage = np.asarray(ndimage)[None, :] + + for image in ndimage: + self._fh.write(image, **kwargs) + + if self._request._uri_type == URI_BYTES: + self._fh.close() + file = cast(BytesIO, self._request.get_file()) + return file.getvalue() + + def metadata( + self, *, index: int = Ellipsis, page: int = None, exclude_applied: bool = True + ) -> Dict[str, Any]: + """Format-Specific TIFF metadata. + + The metadata returned depends on the value of both ``index`` and + ``page``. ``index`` selects a series and ``page`` allows selecting a + single page from the selected series. If ``index=Ellipsis``, ``page`` is + understood as a flat index, i.e., the selection ignores individual + series inside the file. If ``index=Ellipsis`` and ``page=None`` then + global (file-level) metadata is returned. + + Parameters + ---------- + index : int + Select the series of which to extract metadata from. If Ellipsis, treat + page as a flat index into the file's pages. + page : int + If not None, select the page of which to extract metadata from. If + None, read series-level metadata or, if ``index=...`` global, + file-level metadata. + exclude_applied : bool + For API compatibility. Currently ignored. + + Returns + ------- + metadata : dict + A dictionary with information regarding the tiff flavor (file-level) + or tiff tags (page-level). + """ + + if index is not Ellipsis and page is not None: + target = self._fh.series[index].pages[page] + elif index is not Ellipsis and page is None: + # This is based on my understanding that series-level metadata is + # stored in the first TIFF page. + target = self._fh.series[index].pages[0] + elif index is Ellipsis and page is not None: + target = self._fh.pages[page] + else: + target = None + + metadata = {} + if target is None: + # return file-level metadata + metadata["byteorder"] = self._fh.byteorder + + for flag in tifffile.TIFF.FILE_FLAGS: + flag_value = getattr(self._fh, "is_" + flag) + metadata["is_" + flag] = flag_value + + if flag_value and hasattr(self._fh, flag + "_metadata"): + flavor_metadata = getattr(self._fh, flag + "_metadata") + if isinstance(flavor_metadata, tuple): + metadata.update(flavor_metadata[0]) + else: + metadata.update(flavor_metadata) + else: + # tifffile may return a TiffFrame instead of a page + target = target.keyframe + + metadata.update({tag.name: tag.value for tag in target.tags}) + metadata.update( + { + "planar_configuration": target.planarconfig, + "compression": target.compression, + "predictor": target.predictor, + "orientation": None, # TODO + "description1": target.description1, + "description": target.description, + "software": target.software, + **_get_resolution(target), + "datetime": target.datetime, + } + ) + + return metadata + + def properties(self, *, index: int = None, page: int = None) -> ImageProperties: + """Standardized metadata. + + The properties returned depend on the value of both ``index`` and + ``page``. ``index`` selects a series and ``page`` allows selecting a + single page from the selected series. If ``index=Ellipsis``, ``page`` is + understood as a flat index, i.e., the selection ignores individual + series inside the file. If ``index=Ellipsis`` and ``page=None`` then + global (file-level) properties are returned. If ``index=Ellipsis`` + and ``page=...``, file-level properties for the flattened index are + returned. + + Parameters + ---------- + index : int + If ``int``, select the ndimage (series) located at that index inside + the file. If ``Ellipsis`` and ``page`` is ``int`` extract the + properties of the page located at that (flat) index inside the file. + If ``Ellipsis`` and ``page=None``, return the properties for the + batch of all ndimages in the file. + page : int + If ``None`` return the properties of the full ndimage. If ``...`` + return the properties of the flattened index. If ``int``, + return the properties of the page at the selected index only. + + Returns + ------- + image_properties : ImageProperties + The standardized metadata (properties) of the selected ndimage or series. + + """ + index = index or 0 + page_idx = 0 if page in (None, Ellipsis) else page + + if index is Ellipsis: + target_page = self._fh.pages[page_idx] + else: + target_page = self._fh.series[index].pages[page_idx] + + if index is Ellipsis and page is None: + n_series = len(self._fh.series) + props = ImageProperties( + shape=(n_series, *target_page.shape), + dtype=target_page.dtype, + n_images=n_series, + is_batch=True, + spacing=_get_resolution(target_page).get("resolution"), + ) + elif index is Ellipsis and page is Ellipsis: + n_pages = len(self._fh.pages) + props = ImageProperties( + shape=(n_pages, *target_page.shape), + dtype=target_page.dtype, + n_images=n_pages, + is_batch=True, + spacing=_get_resolution(target_page).get("resolution"), + ) + else: + props = ImageProperties( + shape=target_page.shape, + dtype=target_page.dtype, + is_batch=False, + spacing=_get_resolution(target_page).get("resolution"), + ) + + return props + + def close(self) -> None: + if self._fh is not None: + self._fh.close() + + super().close() + + # ------------------------------ + # Add-on Interface inside imopen + # ------------------------------ + + def iter_pages(self, index=..., **kwargs): + """Yield pages from a TIFF file. + + This generator walks over the flat index of the pages inside an + ImageResource and yields them in order. + + Parameters + ---------- + index : int + The index of the series to yield pages from. If Ellipsis, walk over + the file's flat index (and ignore individual series). + kwargs : Any + Additional kwargs are passed to TiffPage's ``as_array`` method. + + Yields + ------ + page : np.ndarray + A page stored inside the TIFF file. + + """ + + if index is Ellipsis: + pages = self._fh.pages + else: + pages = self._fh.series[index] + + for page in pages: + yield page.asarray(**kwargs) diff --git a/parrot/lib/python3.10/site-packages/imageio/py.typed b/parrot/lib/python3.10/site-packages/imageio/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/imageio/testing.py b/parrot/lib/python3.10/site-packages/imageio/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..535b1386d92d5f452826d3d05f7e93059607a759 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/testing.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Distributed under the (new) BSD License. See LICENSE.txt for more info. + +""" Functionality used for testing. This code itself is not covered in tests. +""" + +import os +import sys +import pytest + +# Get root dir +THIS_DIR = os.path.abspath(os.path.dirname(__file__)) +ROOT_DIR = THIS_DIR +for i in range(9): + ROOT_DIR = os.path.dirname(ROOT_DIR) + if os.path.isfile(os.path.join(ROOT_DIR, ".gitignore")): + break + + +# Functions to use from invoke tasks + + +def test_unit(cov_report="term"): + """Run all unit tests. Returns exit code.""" + orig_dir = os.getcwd() + os.chdir(ROOT_DIR) + try: + _clear_imageio() + _enable_faulthandler() + return pytest.main( + [ + "-v", + "--cov", + "imageio", + "--cov-config", + ".coveragerc", + "--cov-report", + cov_report, + "tests", + ] + ) + finally: + os.chdir(orig_dir) + import imageio + + print("Tests were performed on", str(imageio)) + + +# Requirements + + +def _enable_faulthandler(): + """Enable faulthandler (if we can), so that we get tracebacks + on segfaults. + """ + try: + import faulthandler + + faulthandler.enable() + print("Faulthandler enabled") + except Exception: + print("Could not enable faulthandler") + + +def _clear_imageio(): + # Remove ourselves from sys.modules to force an import + for key in list(sys.modules.keys()): + if key.startswith("imageio"): + del sys.modules[key] diff --git a/parrot/lib/python3.10/site-packages/imageio/typing.py b/parrot/lib/python3.10/site-packages/imageio/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..1e97d5b1d8079059a2e3b1f4674b878e17534f24 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/typing.py @@ -0,0 +1,17 @@ +from io import BytesIO +from typing import Union, BinaryIO +from pathlib import Path + +try: + from numpy.typing import ArrayLike +except ImportError: + # numpy<1.20 fall back to using ndarray + from numpy import ndarray as ArrayLike + +ImageResource = Union[str, bytes, BytesIO, Path, BinaryIO] + + +__all__ = [ + "ArrayLike", + "ImageResource", +] diff --git a/parrot/lib/python3.10/site-packages/imageio/v2.py b/parrot/lib/python3.10/site-packages/imageio/v2.py new file mode 100644 index 0000000000000000000000000000000000000000..db2963b7cb3a634bbb9d25ddbb0f64f95dd9391a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/v2.py @@ -0,0 +1,676 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +import re +import warnings +from numbers import Number +from pathlib import Path +from typing import Dict + +import numpy as np + +from imageio.core.legacy_plugin_wrapper import LegacyPlugin +from imageio.core.util import Array +from imageio.core.v3_plugin_api import PluginV3 + +from . import formats +from .config import known_extensions, known_plugins +from .core import RETURN_BYTES +from .core.imopen import imopen + +MEMTEST_DEFAULT_MIM = "256MB" +MEMTEST_DEFAULT_MVOL = "1GB" + + +mem_re = re.compile(r"^(\d+\.?\d*)\s*([kKMGTPEZY]?i?)B?$") +sizes = {"": 1, None: 1} +for i, si in enumerate([""] + list("kMGTPEZY")): + sizes[si] = 1000**i + if si: + sizes[si.upper() + "i"] = 1024**i + + +def to_nbytes(arg, default=None): + if not arg: + arg = float("inf") + + if arg is True: + arg = default + + if isinstance(arg, Number): + return arg + + match = mem_re.match(arg) + if match is None: + raise ValueError( + "Memory size could not be parsed " + "(is your capitalisation correct?): {}".format(arg) + ) + + num, unit = match.groups() + + try: + return float(num) * sizes[unit] + except KeyError: # pragma: no cover + # Note: I don't think we can reach this + raise ValueError( + "Memory size unit not recognised " + "(is your capitalisation correct?): {}".format(unit) + ) + + +def help(name=None): + """help(name=None) + + Print the documentation of the format specified by name, or a list + of supported formats if name is omitted. + + Parameters + ---------- + name : str + Can be the name of a format, a filename extension, or a full + filename. See also the :doc:`formats page <../formats/index>`. + """ + if not name: + print(formats) + else: + print(formats[name]) + + +def decypher_format_arg(format_name: str) -> Dict[str, str]: + """Split format into plugin and format + + The V2 API aliases plugins and supported formats. This function + splits these so that they can be fed separately to `iio.imopen`. + + """ + + plugin = None + extension = None + + if format_name is None: + pass # nothing to do + elif Path(format_name).suffix.lower() in known_extensions: + extension = Path(format_name).suffix.lower() + elif format_name in known_plugins: + plugin = format_name + elif format_name.upper() in known_plugins: + plugin = format_name.upper() + elif format_name.lower() in known_extensions: + extension = format_name.lower() + elif "." + format_name.lower() in known_extensions: + extension = "." + format_name.lower() + else: + raise IndexError(f"No format known by name `{plugin}`.") + + return {"plugin": plugin, "extension": extension} + + +class LegacyReader: + def __init__(self, plugin_instance: PluginV3, **kwargs): + self.instance = plugin_instance + self.last_index = 0 + self.closed = False + + if ( + type(self.instance).__name__ == "PillowPlugin" + and kwargs.get("pilmode") is not None + ): + kwargs["mode"] = kwargs["pilmode"] + del kwargs["pilmode"] + + self.read_args = kwargs + + def close(self): + if not self.closed: + self.instance.close() + self.closed = True + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __del__(self): + self.close() + + @property + def request(self): + return self.instance.request + + @property + def format(self): + raise TypeError("V3 Plugins don't have a format.") + + def get_length(self): + return self.instance.properties(index=...).n_images + + def get_data(self, index): + self.last_index = index + img = self.instance.read(index=index, **self.read_args) + metadata = self.instance.metadata(index=index, exclude_applied=False) + return Array(img, metadata) + + def get_next_data(self): + return self.get_data(self.last_index + 1) + + def set_image_index(self, index): + self.last_index = index - 1 + + def get_meta_data(self, index=None): + return self.instance.metadata(index=index, exclude_applied=False) + + def iter_data(self): + for idx, img in enumerate(self.instance.iter()): + metadata = self.instance.metadata(index=idx, exclude_applied=False) + yield Array(img, metadata) + + def __iter__(self): + return self.iter_data() + + def __len__(self): + return self.get_length() + + +class LegacyWriter: + def __init__(self, plugin_instance: PluginV3, **kwargs): + self.instance = plugin_instance + self.last_index = 0 + self.closed = False + + if type(self.instance).__name__ == "PillowPlugin" and "pilmode" in kwargs: + kwargs["mode"] = kwargs["pilmode"] + del kwargs["pilmode"] + + self.write_args = kwargs + + def close(self): + if not self.closed: + self.instance.close() + self.closed = True + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __del__(self): + self.close() + + @property + def request(self): + return self.instance.request + + @property + def format(self): + raise TypeError("V3 Plugins don't have a format.") + + def append_data(self, im, meta=None): + # TODO: write metadata in the future; there is currently no + # generic way to do this with v3 plugins :( + if meta is not None: + warnings.warn( + "V3 Plugins currently don't have a uniform way to" + " write metadata, so any metadata is ignored." + ) + + # total_meta = dict() + # if meta is None: + # meta = {} + # if hasattr(im, "meta") and isinstance(im.meta, dict): + # total_meta.update(im.meta) + # total_meta.update(meta) + + return self.instance.write(im, **self.write_args) + + def set_meta_data(self, meta): + # TODO: write metadata + raise NotImplementedError( + "V3 Plugins don't have a uniform way to write metadata (yet)." + ) + + +def is_batch(ndimage): + if isinstance(ndimage, (list, tuple)): + return True + + ndimage = np.asarray(ndimage) + if ndimage.ndim <= 2: + return False + elif ndimage.ndim == 3 and ndimage.shape[2] < 5: + return False + + return True + + +def is_volume(ndimage): + ndimage = np.asarray(ndimage) + if not is_batch(ndimage): + return False + + if ndimage.ndim == 3 and ndimage.shape[2] >= 5: + return True + elif ndimage.ndim == 4 and ndimage.shape[3] < 5: + return True + else: + return False + + +# Base functions that return a reader/writer + + +def get_reader(uri, format=None, mode="?", **kwargs): + """get_reader(uri, format=None, mode='?', **kwargs) + + Returns a :class:`.Reader` object which can be used to read data + and meta data from the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + mode : {'i', 'I', 'v', 'V', '?'} + Used to give the reader a hint on what the user expects (default "?"): + "i" for an image, "I" for multiple images, "v" for a volume, + "V" for multiple volumes, "?" for don't care. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + + image_file = imopen(uri, "r" + mode, **imopen_args) + + if isinstance(image_file, LegacyPlugin): + return image_file.legacy_get_reader(**kwargs) + else: + return LegacyReader(image_file, **kwargs) + + +def get_writer(uri, format=None, mode="?", **kwargs): + """get_writer(uri, format=None, mode='?', **kwargs) + + Returns a :class:`.Writer` object which can be used to write data + and meta data to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the image to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + format : str + The format to use to write the file. By default imageio selects + the appropriate for you based on the filename. + mode : {'i', 'I', 'v', 'V', '?'} + Used to give the writer a hint on what the user expects (default '?'): + "i" for an image, "I" for multiple images, "v" for a volume, + "V" for multiple volumes, "?" for don't care. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + + image_file = imopen(uri, "w" + mode, **imopen_args) + if isinstance(image_file, LegacyPlugin): + return image_file.legacy_get_writer(**kwargs) + else: + return LegacyWriter(image_file, **kwargs) + + +# Images + + +def imread(uri, format=None, **kwargs): + """imread(uri, format=None, **kwargs) + + Reads an image from the specified file. Returns a numpy array, which + comes with a dict of meta data at its 'meta' attribute. + + Note that the image data is returned as-is, and may not always have + a dtype of uint8 (and thus may differ from what e.g. PIL returns). + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + + with imopen(uri, "ri", **imopen_args) as file: + result = file.read(index=0, **kwargs) + + return result + + +def imwrite(uri, im, format=None, **kwargs): + """imwrite(uri, im, format=None, **kwargs) + + Write an image to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the image to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + im : numpy.ndarray + The image data. Must be NxM, NxMx3 or NxMx4. + format : str + The format to use to write the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Test image + imt = type(im) + im = np.asarray(im) + if not np.issubdtype(im.dtype, np.number): + raise ValueError("Image is not numeric, but {}.".format(imt.__name__)) + + if is_batch(im) or im.ndim < 2: + raise ValueError("Image must be 2D (grayscale, RGB, or RGBA).") + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "wi", **imopen_args) as file: + return file.write(im, **kwargs) + + +# Multiple images + + +def mimread(uri, format=None, memtest=MEMTEST_DEFAULT_MIM, **kwargs): + """mimread(uri, format=None, memtest="256MB", **kwargs) + + Reads multiple images from the specified file. Returns a list of + numpy arrays, each with a dict of meta data at its 'meta' attribute. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the images from, e.g. a filename,pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + memtest : {bool, int, float, str} + If truthy, this function will raise an error if the resulting + list of images consumes greater than the amount of memory specified. + This is to protect the system from using so much memory that it needs + to resort to swapping, and thereby stall the computer. E.g. + ``mimread('hunger_games.avi')``. + + If the argument is a number, that will be used as the threshold number + of bytes. + + If the argument is a string, it will be interpreted as a number of bytes with + SI/IEC prefixed units (e.g. '1kB', '250MiB', '80.3YB'). + + - Units are case sensitive + - k, M etc. represent a 1000-fold change, where Ki, Mi etc. represent 1024-fold + - The "B" is optional, but if present, must be capitalised + + If the argument is True, the default will be used, for compatibility reasons. + + Default: '256MB' + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # used for mimread and mvolread + nbyte_limit = to_nbytes(memtest, MEMTEST_DEFAULT_MIM) + + images = list() + nbytes = 0 + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "rI", **imopen_args) as file: + for image in file.iter(**kwargs): + images.append(image) + nbytes += image.nbytes + if nbytes > nbyte_limit: + raise RuntimeError( + "imageio.mimread() has read over {}B of " + "image data.\nStopped to avoid memory problems." + " Use imageio.get_reader(), increase threshold, or memtest=False".format( + int(nbyte_limit) + ) + ) + + if len(images) == 1 and is_batch(images[0]): + images = [*images[0]] + + return images + + +def mimwrite(uri, ims, format=None, **kwargs): + """mimwrite(uri, ims, format=None, **kwargs) + + Write multiple images to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the images to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + ims : sequence of numpy arrays + The image data. Each array must be NxM, NxMx3 or NxMx4. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + if not is_batch(ims): + raise ValueError("Image data must be a sequence of ndimages.") + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "wI", **imopen_args) as file: + return file.write(ims, is_batch=True, **kwargs) + + +# Volumes + + +def volread(uri, format=None, **kwargs): + """volread(uri, format=None, **kwargs) + + Reads a volume from the specified file. Returns a numpy array, which + comes with a dict of meta data at its 'meta' attribute. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the volume from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "rv", **imopen_args) as file: + return file.read(index=0, **kwargs) + + +def volwrite(uri, im, format=None, **kwargs): + """volwrite(uri, vol, format=None, **kwargs) + + Write a volume to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the image to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + vol : numpy.ndarray + The image data. Must be NxMxL (or NxMxLxK if each voxel is a tuple). + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Test image + im = np.asarray(im) + if not is_volume(im): + raise ValueError("Image must be 3D, or 4D if each voxel is a tuple.") + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + + with imopen(uri, "wv", **imopen_args) as file: + return file.write(im, is_batch=False, **kwargs) + + +# Multiple volumes + + +def mvolread(uri, format=None, memtest=MEMTEST_DEFAULT_MVOL, **kwargs): + """mvolread(uri, format=None, memtest='1GB', **kwargs) + + Reads multiple volumes from the specified file. Returns a list of + numpy arrays, each with a dict of meta data at its 'meta' attribute. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the volumes from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + memtest : {bool, int, float, str} + If truthy, this function will raise an error if the resulting + list of images consumes greater than the amount of memory specified. + This is to protect the system from using so much memory that it needs + to resort to swapping, and thereby stall the computer. E.g. + ``mimread('hunger_games.avi')``. + + If the argument is a number, that will be used as the threshold number + of bytes. + + If the argument is a string, it will be interpreted as a number of bytes with + SI/IEC prefixed units (e.g. '1kB', '250MiB', '80.3YB'). + + - Units are case sensitive + - k, M etc. represent a 1000-fold change, where Ki, Mi etc. represent 1024-fold + - The "B" is optional, but if present, must be capitalised + + If the argument is True, the default will be used, for compatibility reasons. + + Default: '1GB' + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # used for mimread and mvolread + nbyte_limit = to_nbytes(memtest, MEMTEST_DEFAULT_MVOL) + + images = list() + nbytes = 0 + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "rV", **imopen_args) as file: + for image in file.iter(**kwargs): + images.append(image) + nbytes += image.nbytes + if nbytes > nbyte_limit: + raise RuntimeError( + "imageio.mimread() has read over {}B of " + "image data.\nStopped to avoid memory problems." + " Use imageio.get_reader(), increase threshold, or memtest=False".format( + int(nbyte_limit) + ) + ) + + return images + + +def mvolwrite(uri, ims, format=None, **kwargs): + """mvolwrite(uri, vols, format=None, **kwargs) + + Write multiple volumes to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the volumes to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + ims : sequence of numpy arrays + The image data. Each array must be NxMxL (or NxMxLxK if each + voxel is a tuple). + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + for im in ims: + if not is_volume(im): + raise ValueError("Image must be 3D, or 4D if each voxel is a tuple.") + + imopen_args = decypher_format_arg(format) + imopen_args["legacy_mode"] = True + with imopen(uri, "wV", **imopen_args) as file: + return file.write(ims, is_batch=True, **kwargs) + + +# aliases +read = get_reader +save = get_writer +imsave = imwrite +mimsave = mimwrite +volsave = volwrite +mvolsave = mvolwrite + +__all__ = [ + "imread", + "mimread", + "volread", + "mvolread", + "imwrite", + "mimwrite", + "volwrite", + "mvolwrite", + # misc + "help", + "get_reader", + "get_writer", + "RETURN_BYTES", +] diff --git a/parrot/lib/python3.10/site-packages/imageio/v3.py b/parrot/lib/python3.10/site-packages/imageio/v3.py new file mode 100644 index 0000000000000000000000000000000000000000..65d36e57fbf6f935981cbeeb710f078df8872206 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/v3.py @@ -0,0 +1,259 @@ +import numpy as np + +from .core.imopen import imopen + + +def imread(uri, *, index=None, plugin=None, extension=None, format_hint=None, **kwargs): + """Read an ndimage from a URI. + + Opens the given URI and reads an ndimage from it. The exact behavior + depends on both the file type and plugin used to open the file. To learn + about the exact behavior, check the documentation of the relevant plugin. + Typically, imread attempts to read all data stored in the URI. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + index : {int, Ellipsis, None} + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return it. If + index is an ellipsis (...), read all ndimages in the file and stack them + along a new batch dimension. If index is None, let the plugin decide. + plugin : {str, None} + The plugin to use. If set to None (default) imread will perform a + search for a matching plugin. If not None, this takes priority over + the provided format hint (if present). + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered. + format_hint : str + Deprecated. Use `extension` instead. + **kwargs : + Additional keyword arguments will be passed to the plugin's read call. + + Returns + ------- + image : ndimage + The ndimage located at the given URI. + """ + + plugin_kwargs = { + "legacy_mode": False, + "plugin": plugin, + "format_hint": format_hint, + "extension": extension, + } + + call_kwargs = kwargs + if index is not None: + call_kwargs["index"] = index + + with imopen(uri, "r", **plugin_kwargs) as img_file: + return np.asarray(img_file.read(**call_kwargs)) + + +def imiter(uri, *, plugin=None, extension=None, format_hint=None, **kwargs): + """Read a sequence of ndimages from a URI. + + Returns an iterable that yields ndimages from the given URI. The exact + behavior depends on both, the file type and plugin used to open the file. + To learn about the exact behavior, check the documentation of the relevant + plugin. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + plugin : {str, None} + The plugin to use. If set to None (default) imiter will perform a + search for a matching plugin. If not None, this takes priority over + the provided format hint (if present). + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered. + format_hint : str + Deprecated. Use `extension` instead. + **kwargs : + Additional keyword arguments will be passed to the plugin's ``iter`` + call. + + Yields + ------ + image : ndimage + The next ndimage located at the given URI. + + """ + + with imopen( + uri, + "r", + legacy_mode=False, + plugin=plugin, + format_hint=format_hint, + extension=extension, + ) as img_file: + for image in img_file.iter(**kwargs): + # Note: casting to ndarray here to ensure compatibility + # with the v2.9 API + yield np.asarray(image) + + +def imwrite(uri, image, *, plugin=None, extension=None, format_hint=None, **kwargs): + """Write an ndimage to the given URI. + + The exact behavior depends on the file type and plugin used. To learn about + the exact behavior, check the documentation of the relevant plugin. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to save the image to, e.g. a filename, pathlib.Path, + http address or file object, check the docs for more info. + image : np.ndarray + The image to write to disk. + plugin : {str, None} + The plugin to use. If set to None (default) imwrite will perform a + search for a matching plugin. If not None, this takes priority over + the provided format hint (if present). + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered, and + may also influence the format used when encoding. + format_hint : str + Deprecated. Use `extension` instead. + **kwargs : + Additional keyword arguments will be passed to the plugin's ``write`` + call. + + Returns + ------- + encoded_image : None or Bytes + Returns ``None`` in all cases, except when ``uri`` is set to ````. + In this case it returns the encoded ndimage as a bytes string. + + """ + + with imopen( + uri, + "w", + legacy_mode=False, + plugin=plugin, + format_hint=format_hint, + extension=extension, + ) as img_file: + encoded = img_file.write(image, **kwargs) + + return encoded + + +def improps(uri, *, index=None, plugin=None, extension=None, **kwargs): + """Read standardized metadata. + + Opens the given URI and reads the properties of an ndimage from it. The + properties represent standardized metadata. This means that they will have + the same name regardless of the format being read or plugin/backend being + used. Further, any field will be, where possible, populated with a sensible + default (may be `None`) if the ImageResource does not declare a value in its + metadata. + + Parameters + ---------- + index : int + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return its + properties. If index is an ellipsis (...), read all ndimages in the file + and stack them along a new batch dimension and return their properties. + If index is None, let the plugin decide. + plugin : {str, None} + The plugin to be used. If None, performs a search for a matching + plugin. + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered. + **kwargs : + Additional keyword arguments will be passed to the plugin's ``properties`` + call. + + Returns + ------- + properties : ImageProperties + A dataclass filled with standardized image metadata. + + Notes + ----- + Where possible, this will avoid loading pixel data. + + See Also + -------- + imageio.core.v3_plugin_api.ImageProperties + + """ + + plugin_kwargs = {"legacy_mode": False, "plugin": plugin, "extension": extension} + + call_kwargs = kwargs + if index is not None: + call_kwargs["index"] = index + + with imopen(uri, "r", **plugin_kwargs) as img_file: + properties = img_file.properties(**call_kwargs) + + return properties + + +def immeta( + uri, *, index=None, plugin=None, extension=None, exclude_applied=True, **kwargs +): + """Read format-specific metadata. + + Opens the given URI and reads metadata for an ndimage from it. The contents + of the returned metadata dictionary is specific to both the image format and + plugin used to open the ImageResource. To learn about the exact behavior, + check the documentation of the relevant plugin. Typically, immeta returns a + dictionary specific to the image format, where keys match metadata field + names and values are a field's contents. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, http + address or file object, see the docs for more info. + index : {int, None} + If the ImageResource contains multiple ndimages, and index is an + integer, select the index-th ndimage from among them and return its + metadata. If index is an ellipsis (...), return global metadata. If + index is None, let the plugin decide the default. + plugin : {str, None} + The plugin to be used. If None (default), performs a search for a + matching plugin. + extension : str + If not None, treat the provided ImageResource as if it had the given + extension. This affects the order in which backends are considered. + **kwargs : + Additional keyword arguments will be passed to the plugin's metadata + method. + + Returns + ------- + image : ndimage + The ndimage located at the given URI. + + """ + + plugin_kwargs = {"legacy_mode": False, "plugin": plugin, "extension": extension} + + call_kwargs = kwargs + call_kwargs["exclude_applied"] = exclude_applied + if index is not None: + call_kwargs["index"] = index + + with imopen(uri, "r", **plugin_kwargs) as img_file: + metadata = img_file.metadata(**call_kwargs) + + return metadata + + +__all__ = ["imopen", "imread", "imwrite", "imiter", "improps", "immeta"] diff --git a/parrot/lib/python3.10/site-packages/imageio/v3.pyi b/parrot/lib/python3.10/site-packages/imageio/v3.pyi new file mode 100644 index 0000000000000000000000000000000000000000..339e33e12f67af086b671ccf4f2e638ae914799e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/imageio/v3.pyi @@ -0,0 +1,62 @@ +from typing import Any, Dict, Iterator, List, Literal, Optional, Union, overload + +import numpy as np + +from .core.imopen import imopen as imopen +from .core.v3_plugin_api import ImageProperties +from .typing import ArrayLike, ImageResource + +def imread( + uri: ImageResource, + *, + index: Optional[int] = 0, + plugin: str = None, + extension: str = None, + format_hint: str = None, + **kwargs +) -> np.ndarray: ... +def imiter( + uri: ImageResource, + *, + plugin: str = None, + extension: str = None, + format_hint: str = None, + **kwargs +) -> Iterator[np.ndarray]: ... +@overload +def imwrite( + uri: Literal[""], + image: Union[ArrayLike, List[ArrayLike]], + *, + plugin: str = None, + extension: str = None, + format_hint: str = None, + **kwargs +) -> bytes: ... +@overload +def imwrite( + uri: ImageResource, + image: Union[ArrayLike, List[ArrayLike]], + *, + plugin: str = None, + extension: str = None, + format_hint: str = None, + **kwargs +) -> None: ... +def improps( + uri, + *, + index: Optional[int] = 0, + plugin: str = None, + extension: str = None, + **kwargs +) -> ImageProperties: ... +def immeta( + uri, + *, + index: Optional[int] = 0, + plugin: str = None, + extension: str = None, + exclude_applied: bool = True, + **kwargs +) -> Dict[str, Any]: ... diff --git a/parrot/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER b/parrot/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/parrot/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL b/parrot/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-manylinux1_x86_64 + diff --git a/parrot/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt b/parrot/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/nvidia_cuda_runtime_cu12-12.1.105.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/parrot/lib/python3.10/site-packages/tiktoken-0.7.0.dist-info/top_level.txt b/parrot/lib/python3.10/site-packages/tiktoken-0.7.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..859880ea40d957171e49537207249fdca13946cb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/tiktoken-0.7.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +tiktoken +tiktoken_ext